xref: /haiku/src/add-ons/kernel/drivers/graphics/intel_extreme/intel_extreme.cpp (revision 52f7c9389475e19fc21487b38064b4390eeb6fea)
1 /*
2  * Copyright 2006-2018, Haiku, Inc. All Rights Reserved.
3  * Distributed under the terms of the MIT License.
4  *
5  * Authors:
6  *		Axel Dörfler, axeld@pinc-software.de
7  *		Alexander von Gluck IV, kallisti5@unixzen.com
8  *		Adrien Destugues, pulkomandy@pulkomandy.tk
9  */
10 
11 
12 #include "intel_extreme.h"
13 
14 #include <unistd.h>
15 #include <stdio.h>
16 #include <string.h>
17 #include <errno.h>
18 
19 #include <AreaKeeper.h>
20 #include <boot_item.h>
21 #include <driver_settings.h>
22 #include <util/kernel_cpp.h>
23 
24 #include <vesa_info.h>
25 
26 #include "driver.h"
27 #include "power.h"
28 #include "utility.h"
29 
30 
31 #define TRACE_INTELEXTREME
32 #ifdef TRACE_INTELEXTREME
33 #	define TRACE(x...) dprintf("intel_extreme: " x)
34 #else
35 #	define TRACE(x) ;
36 #endif
37 
38 #define ERROR(x...) dprintf("intel_extreme: " x)
39 #define CALLED(x...) TRACE("intel_extreme: CALLED %s\n", __PRETTY_FUNCTION__)
40 
41 
42 static void
43 init_overlay_registers(overlay_registers* _registers)
44 {
45 	user_memset(_registers, 0, B_PAGE_SIZE);
46 
47 	overlay_registers registers;
48 	memset(&registers, 0, sizeof(registers));
49 	registers.contrast_correction = 0x48;
50 	registers.saturation_cos_correction = 0x9a;
51 		// this by-passes contrast and saturation correction
52 
53 	user_memcpy(_registers, &registers, sizeof(overlay_registers));
54 }
55 
56 
57 static void
58 read_settings(bool &hardwareCursor)
59 {
60 	hardwareCursor = false;
61 
62 	void* settings = load_driver_settings("intel_extreme");
63 	if (settings != NULL) {
64 		hardwareCursor = get_driver_boolean_parameter(settings,
65 			"hardware_cursor", true, true);
66 
67 		unload_driver_settings(settings);
68 	}
69 }
70 
71 
72 static int32
73 release_vblank_sem(intel_info &info)
74 {
75 	int32 count;
76 	if (get_sem_count(info.shared_info->vblank_sem, &count) == B_OK
77 		&& count < 0) {
78 		release_sem_etc(info.shared_info->vblank_sem, -count,
79 			B_DO_NOT_RESCHEDULE);
80 		return B_INVOKE_SCHEDULER;
81 	}
82 
83 	return B_HANDLED_INTERRUPT;
84 }
85 
86 
87 static void
88 bdw_enable_interrupts(intel_info& info, pipe_index pipe, bool enable)
89 {
90 	ASSERT(pipe != INTEL_PIPE_ANY);
91 	ASSERT(info.device_type.Generation() >= 12 || pipe != INTEL_PIPE_D);
92 
93 	const uint32 regMask = PCH_INTERRUPT_PIPE_MASK_BDW(pipe);
94 	const uint32 regEnabled = PCH_INTERRUPT_PIPE_ENABLED_BDW(pipe);
95 	const uint32 regIdentity = PCH_INTERRUPT_PIPE_IDENTITY_BDW(pipe);
96 	const uint32 value = enable ? PCH_INTERRUPT_VBLANK_BDW : 0;
97 	write32(info, regIdentity, ~0);
98 	write32(info, regEnabled, value);
99 	write32(info, regMask, ~value);
100 }
101 
102 
103 static void
104 bdw_enable_global_interrupts(intel_info& info, bool enable)
105 {
106 	const uint32 bit = PCH_MASTER_INT_CTL_GLOBAL_BDW;
107 	const uint32 mask = enable ? bit : 0;
108 	const uint32 value = read32(info, PCH_MASTER_INT_CTL_BDW);
109 	write32(info, PCH_MASTER_INT_CTL_BDW, (value & ~bit) | mask);
110 }
111 
112 
113 /*!
114 	Checks interrupt status in master interrupt control register.
115 	For Gen8 to Gen11. From Gen11 the register is called DISPLAY_INT_CTL.
116 */
117 static bool
118 bdw_check_interrupt(intel_info& info, pipes& which)
119 {
120 	ASSERT(info.device_type.Generation() >= 12 || !which.HasPipe(INTEL_PIPE_D));
121 
122 	which.ClearPipe(INTEL_PIPE_ANY);
123 	const uint32 interrupt = read32(info, PCH_MASTER_INT_CTL_BDW);
124 	if ((interrupt & PCH_MASTER_INT_CTL_PIPE_PENDING_BDW(INTEL_PIPE_A)) != 0)
125 		which.SetPipe(INTEL_PIPE_A);
126 	if ((interrupt & PCH_MASTER_INT_CTL_PIPE_PENDING_BDW(INTEL_PIPE_B)) != 0)
127 		which.SetPipe(INTEL_PIPE_B);
128 	if ((interrupt & PCH_MASTER_INT_CTL_PIPE_PENDING_BDW(INTEL_PIPE_C)) != 0)
129 		which.SetPipe(INTEL_PIPE_C);
130 	if ((interrupt & PCH_MASTER_INT_CTL_PIPE_PENDING_BDW(INTEL_PIPE_D)) != 0)
131 		which.SetPipe(INTEL_PIPE_D);
132 	return which.HasPipe(INTEL_PIPE_ANY);
133 }
134 
135 
136 /*!
137 	Checks vblank interrupt status for a specified pipe.
138 	Gen8 to Gen12.
139 */
140 static bool
141 bdw_check_pipe_interrupt(intel_info& info, pipe_index pipe)
142 {
143 	ASSERT(pipe != INTEL_PIPE_ANY);
144 	ASSERT(info.device_type.Generation() >= 12 || pipe != INTEL_PIPE_D);
145 
146 	const uint32 identity = read32(info, PCH_INTERRUPT_PIPE_IDENTITY_BDW(pipe));
147 	return (identity & PCH_INTERRUPT_VBLANK_BDW) != 0;
148 }
149 
150 
151 static void
152 bdw_clear_pipe_interrupt(intel_info& info, pipe_index pipe)
153 {
154 	ASSERT(pipe != INTEL_PIPE_ANY);
155 	ASSERT(info.device_type.Generation() >= 12 || pipe != INTEL_PIPE_D);
156 
157 	const uint32 regIdentity = PCH_INTERRUPT_PIPE_IDENTITY_BDW(pipe);
158 	const uint32 identity = read32(info, regIdentity);
159 	write32(info, regIdentity, identity | PCH_INTERRUPT_VBLANK_BDW);
160 }
161 
162 
163 /** Get the appropriate interrupt mask for enabling or testing interrupts on
164  * the given pipe.
165  *
166  * The bits to test or set are different depending on the hardware generation.
167  *
168  * \param info Intel_extreme driver information
169  * \param pipe pipe to use
170  * \param enable true to get the mask for enabling the interrupts, false to get
171  *               the mask for testing them.
172  */
173 static uint32
174 intel_get_interrupt_mask(intel_info& info, pipe_index pipe, bool enable)
175 {
176 	uint32 mask = 0;
177 	bool hasPCH = info.pch_info != INTEL_PCH_NONE;
178 
179 	// Intel changed the PCH register mapping between Sandy Bridge and the
180 	// later generations (Ivy Bridge and up).
181 	// The PCH register itself does not exist in pre-PCH platforms, and the
182 	// previous interrupt register of course also had a different mapping.
183 
184 	if (pipe == INTEL_PIPE_A) {
185 		if (info.device_type.InGroup(INTEL_GROUP_SNB)
186 				|| info.device_type.InGroup(INTEL_GROUP_ILK))
187 			mask |= PCH_INTERRUPT_VBLANK_PIPEA_SNB;
188 		else if (hasPCH)
189 			mask |= PCH_INTERRUPT_VBLANK_PIPEA;
190 		else
191 			mask |= INTERRUPT_VBLANK_PIPEA;
192 	}
193 
194 	if (pipe == INTEL_PIPE_B) {
195 		if (info.device_type.InGroup(INTEL_GROUP_SNB)
196 				|| info.device_type.InGroup(INTEL_GROUP_ILK))
197 			mask |= PCH_INTERRUPT_VBLANK_PIPEB_SNB;
198 		else if (hasPCH)
199 			mask |= PCH_INTERRUPT_VBLANK_PIPEB;
200 		else
201 			mask |= INTERRUPT_VBLANK_PIPEB;
202 	}
203 
204 #if 0 // FIXME enable when we support the 3rd pipe
205 	if (pipe == INTEL_PIPE_C) {
206 		// Older generations only had two pipes
207 		if (hasPCH && info.device_type.Generation() > 6)
208 			mask |= PCH_INTERRUPT_VBLANK_PIPEC;
209 	}
210 #endif
211 
212 	// On SandyBridge, there is an extra "global enable" flag, which must also
213 	// be set when enabling the interrupts (but not when testing for them).
214 	if (enable && info.device_type.InFamily(INTEL_FAMILY_SER5))
215 		mask |= PCH_INTERRUPT_GLOBAL_SNB;
216 
217 	return mask;
218 }
219 
220 
221 static void
222 intel_enable_interrupts(intel_info& info, pipes which, bool enable)
223 {
224 	uint32 finalMask = 0;
225 	const uint32 pipeAMask = intel_get_interrupt_mask(info, INTEL_PIPE_A, true);
226 	const uint32 pipeBMask = intel_get_interrupt_mask(info, INTEL_PIPE_B, true);
227 	if (which.HasPipe(INTEL_PIPE_A))
228 		finalMask |= pipeAMask;
229 	if (which.HasPipe(INTEL_PIPE_B))
230 		finalMask |= pipeBMask;
231 
232 	const uint32 value = enable ? finalMask : 0;
233 
234 	// Clear all the interrupts
235 	write32(info, find_reg(info, INTEL_INTERRUPT_IDENTITY), ~0);
236 
237 	// enable interrupts - we only want VBLANK interrupts
238 	write32(info, find_reg(info, INTEL_INTERRUPT_ENABLED), value);
239 	write32(info, find_reg(info, INTEL_INTERRUPT_MASK), ~value);
240 }
241 
242 
243 static bool
244 intel_check_interrupt(intel_info& info, pipes& which)
245 {
246 	which.ClearPipe(INTEL_PIPE_ANY);
247 	const uint32 pipeAMask = intel_get_interrupt_mask(info, INTEL_PIPE_A, false);
248 	const uint32 pipeBMask = intel_get_interrupt_mask(info, INTEL_PIPE_B, false);
249 	const uint32 regIdentity = find_reg(info, INTEL_INTERRUPT_IDENTITY);
250 	const uint32 interrupt = read32(info, regIdentity);
251 	if ((interrupt & pipeAMask) != 0)
252 		which.SetPipe(INTEL_PIPE_A);
253 	if ((interrupt & pipeBMask) != 0)
254 		which.SetPipe(INTEL_PIPE_B);
255 	return which.HasPipe(INTEL_PIPE_ANY);
256 }
257 
258 
259 static void
260 g35_clear_interrupt_status(intel_info& info, pipe_index pipe)
261 {
262 	// These registers do not exist on later GPUs.
263 	if (info.device_type.Generation() > 4)
264 		return;
265 
266 	const uint32 value = DISPLAY_PIPE_VBLANK_STATUS | DISPLAY_PIPE_VBLANK_ENABLED;
267 	switch (pipe) {
268 		case INTEL_PIPE_A:
269 			write32(info, INTEL_DISPLAY_A_PIPE_STATUS, value);
270 			break;
271 		case INTEL_PIPE_B:
272 			write32(info, INTEL_DISPLAY_B_PIPE_STATUS, value);
273 			break;
274 		default:
275 			break;
276 	}
277 }
278 
279 
280 static void
281 intel_clear_pipe_interrupt(intel_info& info, pipe_index pipe)
282 {
283 	// On G35/G45, prior to clearing Display Pipe interrupt in IIR
284 	// the corresponding interrupt status must first be cleared.
285 	g35_clear_interrupt_status(info, pipe);
286 
287 	const uint32 regIdentity = find_reg(info, INTEL_INTERRUPT_IDENTITY);
288 	const uint32 bit = intel_get_interrupt_mask(info, pipe, false);
289 	const uint32 identity = read32(info, regIdentity);
290 	write32(info, regIdentity, identity | bit);
291 }
292 
293 
294 /*!
295 	Interrupt routine for Gen8 and Gen9.
296 	TODO: Gen11 and 12 require one additional check at the beginning.
297 	See Gen12 Display Engine: Interrupt Service Routine chapter.
298 */
299 static int32
300 bdw_interrupt_handler(void* data)
301 {
302 	intel_info& info = *(intel_info*)data;
303 
304 	bdw_enable_global_interrupts(info, false);
305 
306 	pipes which;
307 	bool shouldHandle = bdw_check_interrupt(info, which);
308 
309 	if (!shouldHandle) {
310 		bdw_enable_global_interrupts(info, true);
311 		return B_UNHANDLED_INTERRUPT;
312 	}
313 
314 	int32 handled = B_HANDLED_INTERRUPT;
315 
316 	while (shouldHandle) {
317 		if (which.HasPipe(INTEL_PIPE_A) && bdw_check_pipe_interrupt(info, INTEL_PIPE_A)) {
318 			handled = release_vblank_sem(info);
319 
320 			bdw_clear_pipe_interrupt(info, INTEL_PIPE_A);
321 		}
322 
323 		if (which.HasPipe(INTEL_PIPE_B) && bdw_check_pipe_interrupt(info, INTEL_PIPE_B)) {
324 			handled = release_vblank_sem(info);
325 
326 			bdw_clear_pipe_interrupt(info, INTEL_PIPE_B);
327 		}
328 
329 		if (which.HasPipe(INTEL_PIPE_C) && bdw_check_pipe_interrupt(info, INTEL_PIPE_C)) {
330 			handled = release_vblank_sem(info);
331 
332 			bdw_clear_pipe_interrupt(info, INTEL_PIPE_C);
333 		}
334 
335 		shouldHandle = bdw_check_interrupt(info, which);
336 	}
337 
338 	bdw_enable_global_interrupts(info, true);
339 	return handled;
340 }
341 
342 
343 static int32
344 intel_interrupt_handler(void* data)
345 {
346 	intel_info &info = *(intel_info*)data;
347 
348 	pipes which;
349 	bool shouldHandle = intel_check_interrupt(info, which);
350 
351 	if (!shouldHandle)
352 		return B_UNHANDLED_INTERRUPT;
353 
354 	int32 handled = B_HANDLED_INTERRUPT;
355 
356 	while (shouldHandle) {
357 		if (which.HasPipe(INTEL_PIPE_A)) {
358 			handled = release_vblank_sem(info);
359 
360 			intel_clear_pipe_interrupt(info, INTEL_PIPE_A);
361 		}
362 
363 		if (which.HasPipe(INTEL_PIPE_B)) {
364 			handled = release_vblank_sem(info);
365 
366 			intel_clear_pipe_interrupt(info, INTEL_PIPE_B);
367 		}
368 
369 #if 0
370 		// FIXME we don't have support for the 3rd pipe yet
371 		if (which.HasPipe(INTEL_PIPE_C)) {
372 			handled = release_vblank_sem(info);
373 
374 			intel_clear_pipe_interrupt(info, INTEL_PIPE_C);
375 		}
376 #endif
377 
378 		shouldHandle = intel_check_interrupt(info, which);
379 	}
380 
381 	return handled;
382 }
383 
384 
385 static void
386 init_interrupt_handler(intel_info &info)
387 {
388 	info.shared_info->vblank_sem = create_sem(0, "intel extreme vblank");
389 	if (info.shared_info->vblank_sem < B_OK)
390 		return;
391 
392 	status_t status = B_OK;
393 
394 	// We need to change the owner of the sem to the calling team (usually the
395 	// app_server), because userland apps cannot acquire kernel semaphores
396 	thread_id thread = find_thread(NULL);
397 	thread_info threadInfo;
398 	if (get_thread_info(thread, &threadInfo) != B_OK
399 		|| set_sem_owner(info.shared_info->vblank_sem, threadInfo.team)
400 			!= B_OK) {
401 		status = B_ERROR;
402 	}
403 
404 	// Find the right interrupt vector, using MSIs if available.
405 	info.irq = 0xff;
406 	info.use_msi = false;
407 	if (info.pci->u.h0.interrupt_pin != 0x00)
408 		info.irq = info.pci->u.h0.interrupt_line;
409 	if (gPCIx86Module != NULL && gPCIx86Module->get_msi_count(info.pci->bus,
410 			info.pci->device, info.pci->function) >= 1) {
411 		uint8 msiVector = 0;
412 		if (gPCIx86Module->configure_msi(info.pci->bus, info.pci->device,
413 				info.pci->function, 1, &msiVector) == B_OK
414 			&& gPCIx86Module->enable_msi(info.pci->bus, info.pci->device,
415 				info.pci->function) == B_OK) {
416 			TRACE("using message signaled interrupts\n");
417 			info.irq = msiVector;
418 			info.use_msi = true;
419 		}
420 	}
421 
422 	if (status == B_OK && info.irq != 0xff) {
423 		// we've gotten an interrupt line for us to use
424 
425 		info.fake_interrupts = false;
426 
427 		if (info.device_type.Generation() >= 8) {
428 			status = install_io_interrupt_handler(info.irq,
429 				&bdw_interrupt_handler, (void*)&info, 0);
430 			if (status == B_OK) {
431 				bdw_enable_global_interrupts(info, true);
432 				bdw_enable_interrupts(info, INTEL_PIPE_A, true);
433 				bdw_enable_interrupts(info, INTEL_PIPE_B, true);
434 			}
435 		} else {
436 			status = install_io_interrupt_handler(info.irq,
437 				&intel_interrupt_handler, (void*)&info, 0);
438 			if (status == B_OK) {
439 				g35_clear_interrupt_status(info, INTEL_PIPE_A);
440 				g35_clear_interrupt_status(info, INTEL_PIPE_B);
441 
442 				pipes which;
443 				which.SetPipe(INTEL_PIPE_A);
444 				which.SetPipe(INTEL_PIPE_B);
445 				intel_enable_interrupts(info, which, true);
446 			}
447 		}
448 	}
449 	if (status < B_OK) {
450 		// There is no interrupt reserved for us, or we couldn't install our
451 		// interrupt handler, let's fake the vblank interrupt for our clients
452 		// using a timer interrupt
453 		info.fake_interrupts = true;
454 
455 		// TODO: fake interrupts!
456 		ERROR("Fake interrupt mode (no PCI interrupt line assigned\n");
457 		status = B_ERROR;
458 	}
459 
460 	if (status < B_OK) {
461 		delete_sem(info.shared_info->vblank_sem);
462 		info.shared_info->vblank_sem = B_ERROR;
463 	}
464 }
465 
466 
467 //	#pragma mark -
468 
469 
470 status_t
471 intel_free_memory(intel_info &info, addr_t base)
472 {
473 	return gGART->free_memory(info.aperture, base);
474 }
475 
476 
477 status_t
478 intel_allocate_memory(intel_info &info, size_t size, size_t alignment,
479 	uint32 flags, addr_t* _base, phys_addr_t* _physicalBase)
480 {
481 	return gGART->allocate_memory(info.aperture, size, alignment,
482 		flags, _base, _physicalBase);
483 }
484 
485 
486 status_t
487 intel_extreme_init(intel_info &info)
488 {
489 	CALLED();
490 	info.aperture = gGART->map_aperture(info.pci->bus, info.pci->device,
491 		info.pci->function, 0, &info.aperture_base);
492 	if (info.aperture < B_OK) {
493 		ERROR("error: could not map GART aperture! (%s)\n",
494 			strerror(info.aperture));
495 		return info.aperture;
496 	}
497 
498 	AreaKeeper sharedCreator;
499 	info.shared_area = sharedCreator.Create("intel extreme shared info",
500 		(void**)&info.shared_info, B_ANY_KERNEL_ADDRESS,
501 		ROUND_TO_PAGE_SIZE(sizeof(intel_shared_info)) + 3 * B_PAGE_SIZE,
502 		B_FULL_LOCK,
503 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA | B_CLONEABLE_AREA);
504 	if (info.shared_area < B_OK) {
505 		ERROR("error: could not create shared area!\n");
506 		gGART->unmap_aperture(info.aperture);
507 		return info.shared_area;
508 	}
509 
510 	// enable power
511 	gPCI->set_powerstate(info.pci->bus, info.pci->device, info.pci->function,
512 		PCI_pm_state_d0);
513 
514 	memset((void*)info.shared_info, 0, sizeof(intel_shared_info));
515 
516 	int mmioIndex = 1;
517 	if (info.device_type.Generation() >= 3) {
518 		// For some reason Intel saw the need to change the order of the
519 		// mappings with the introduction of the i9xx family
520 		mmioIndex = 0;
521 	}
522 
523 	// evaluate driver settings, if any
524 
525 	bool hardwareCursor;
526 	read_settings(hardwareCursor);
527 
528 	// memory mapped I/O
529 
530 	// TODO: registers are mapped twice (by us and intel_gart), maybe we
531 	// can share it between the drivers
532 
533 	phys_addr_t addr = info.pci->u.h0.base_registers[mmioIndex];
534 	uint64 barSize = info.pci->u.h0.base_register_sizes[mmioIndex];
535 	if ((info.pci->u.h0.base_register_flags[mmioIndex] & PCI_address_type) == PCI_address_type_64) {
536 		addr |= (uint64)info.pci->u.h0.base_registers[mmioIndex + 1] << 32;
537 		barSize |= (uint64)info.pci->u.h0.base_register_sizes[mmioIndex + 1] << 32;
538 	}
539 	AreaKeeper mmioMapper;
540 	info.registers_area = mmioMapper.Map("intel extreme mmio", addr, barSize,
541 		B_ANY_KERNEL_ADDRESS,
542 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA | B_CLONEABLE_AREA,
543 		(void**)&info.registers);
544 	if (mmioMapper.InitCheck() < B_OK) {
545 		ERROR("error: could not map memory I/O!\n");
546 		gGART->unmap_aperture(info.aperture);
547 		return info.registers_area;
548 	}
549 
550 	bool hasPCH = (info.pch_info != INTEL_PCH_NONE);
551 
552 	ERROR("Init Intel generation %d GPU %s PCH split.\n",
553 		info.device_type.Generation(), hasPCH ? "with" : "without");
554 
555 	uint32* blocks = info.shared_info->register_blocks;
556 	blocks[REGISTER_BLOCK(REGS_FLAT)] = 0;
557 
558 	// setup the register blocks for the different architectures
559 	if (hasPCH) {
560 		// PCH based platforms (IronLake through ultra-low-power Broadwells)
561 		blocks[REGISTER_BLOCK(REGS_NORTH_SHARED)]
562 			= PCH_NORTH_SHARED_REGISTER_BASE;
563 		blocks[REGISTER_BLOCK(REGS_NORTH_PIPE_AND_PORT)]
564 			= PCH_NORTH_PIPE_AND_PORT_REGISTER_BASE;
565 		blocks[REGISTER_BLOCK(REGS_NORTH_PLANE_CONTROL)]
566 			= PCH_NORTH_PLANE_CONTROL_REGISTER_BASE;
567 		blocks[REGISTER_BLOCK(REGS_SOUTH_SHARED)]
568 			= PCH_SOUTH_SHARED_REGISTER_BASE;
569 		blocks[REGISTER_BLOCK(REGS_SOUTH_TRANSCODER_PORT)]
570 			= PCH_SOUTH_TRANSCODER_AND_PORT_REGISTER_BASE;
571 	} else {
572 		// (G)MCH/ICH based platforms
573 		blocks[REGISTER_BLOCK(REGS_NORTH_SHARED)]
574 			= MCH_SHARED_REGISTER_BASE;
575 		blocks[REGISTER_BLOCK(REGS_NORTH_PIPE_AND_PORT)]
576 			= MCH_PIPE_AND_PORT_REGISTER_BASE;
577 		blocks[REGISTER_BLOCK(REGS_NORTH_PLANE_CONTROL)]
578 			= MCH_PLANE_CONTROL_REGISTER_BASE;
579 		blocks[REGISTER_BLOCK(REGS_SOUTH_SHARED)]
580 			= ICH_SHARED_REGISTER_BASE;
581 		blocks[REGISTER_BLOCK(REGS_SOUTH_TRANSCODER_PORT)]
582 			= ICH_PORT_REGISTER_BASE;
583 	}
584 
585 	// Everything in the display PRM gets +0x180000
586 	if (info.device_type.InGroup(INTEL_GROUP_VLV)) {
587 		// "I nearly got violent with the hw guys when they told me..."
588 		blocks[REGISTER_BLOCK(REGS_SOUTH_SHARED)] += VLV_DISPLAY_BASE;
589 		blocks[REGISTER_BLOCK(REGS_SOUTH_TRANSCODER_PORT)] += VLV_DISPLAY_BASE;
590 	}
591 
592 	TRACE("REGS_NORTH_SHARED: 0x%" B_PRIx32 "\n",
593 		blocks[REGISTER_BLOCK(REGS_NORTH_SHARED)]);
594 	TRACE("REGS_NORTH_PIPE_AND_PORT: 0x%" B_PRIx32 "\n",
595 		blocks[REGISTER_BLOCK(REGS_NORTH_PIPE_AND_PORT)]);
596 	TRACE("REGS_NORTH_PLANE_CONTROL: 0x%" B_PRIx32 "\n",
597 		blocks[REGISTER_BLOCK(REGS_NORTH_PLANE_CONTROL)]);
598 	TRACE("REGS_SOUTH_SHARED: 0x%" B_PRIx32 "\n",
599 		blocks[REGISTER_BLOCK(REGS_SOUTH_SHARED)]);
600 	TRACE("REGS_SOUTH_TRANSCODER_PORT: 0x%" B_PRIx32 "\n",
601 		blocks[REGISTER_BLOCK(REGS_SOUTH_TRANSCODER_PORT)]);
602 
603 	// make sure bus master, memory-mapped I/O, and frame buffer is enabled
604 	set_pci_config(info.pci, PCI_command, 2, get_pci_config(info.pci,
605 		PCI_command, 2) | PCI_command_io | PCI_command_memory
606 		| PCI_command_master);
607 
608 	// reserve ring buffer memory (currently, this memory is placed in
609 	// the graphics memory), but this could bring us problems with
610 	// write combining...
611 
612 	ring_buffer &primary = info.shared_info->primary_ring_buffer;
613 	if (intel_allocate_memory(info, 16 * B_PAGE_SIZE, 0, 0,
614 			(addr_t*)&primary.base) == B_OK) {
615 		primary.register_base = INTEL_PRIMARY_RING_BUFFER;
616 		primary.size = 16 * B_PAGE_SIZE;
617 		primary.offset = (addr_t)primary.base - info.aperture_base;
618 	}
619 
620 	// Enable clock gating
621 	intel_en_gating(info);
622 
623 	// Enable automatic gpu downclocking if we can to save power
624 	intel_en_downclock(info);
625 
626 	// no errors, so keep areas and mappings
627 	sharedCreator.Detach();
628 	mmioMapper.Detach();
629 
630 	aperture_info apertureInfo;
631 	gGART->get_aperture_info(info.aperture, &apertureInfo);
632 
633 	info.shared_info->registers_area = info.registers_area;
634 	info.shared_info->graphics_memory = (uint8*)info.aperture_base;
635 	info.shared_info->physical_graphics_memory = apertureInfo.physical_base;
636 	info.shared_info->graphics_memory_size = apertureInfo.size;
637 	info.shared_info->frame_buffer = 0;
638 	info.shared_info->dpms_mode = B_DPMS_ON;
639 
640 	// Pull VBIOS panel mode for later use
641 	info.shared_info->got_vbt = get_lvds_mode_from_bios(
642 		&info.shared_info->panel_timing);
643 
644 	/* at least 855gm can't drive more than one head at time */
645 	if (info.device_type.InFamily(INTEL_FAMILY_8xx))
646 		info.shared_info->single_head_locked = 1;
647 
648 	if (info.device_type.InFamily(INTEL_FAMILY_SER5)) {
649 		info.shared_info->pll_info.reference_frequency = 120000;// 120 MHz
650 		info.shared_info->pll_info.max_frequency = 350000;
651 			// 350 MHz RAM DAC speed
652 		info.shared_info->pll_info.min_frequency = 20000;		// 20 MHz
653 	} else if (info.device_type.InFamily(INTEL_FAMILY_9xx)) {
654 		info.shared_info->pll_info.reference_frequency = 96000;	// 96 MHz
655 		info.shared_info->pll_info.max_frequency = 400000;
656 			// 400 MHz RAM DAC speed
657 		info.shared_info->pll_info.min_frequency = 20000;		// 20 MHz
658 	} else if (info.device_type.HasDDI() && (info.device_type.Generation() <= 8)) {
659 		info.shared_info->pll_info.reference_frequency = 135000;// 135 MHz
660 		info.shared_info->pll_info.max_frequency = 350000;
661 			// 350 MHz RAM DAC speed
662 		info.shared_info->pll_info.min_frequency = 25000;		// 25 MHz
663 	} else if ((info.device_type.Generation() == 9) &&
664 				info.device_type.InGroup(INTEL_GROUP_SKY)) {
665 		info.shared_info->pll_info.reference_frequency = 24000;	// 24 MHz
666 		info.shared_info->pll_info.max_frequency = 350000;
667 			// 350 MHz RAM DAC speed
668 		info.shared_info->pll_info.min_frequency = 25000;		// 25 MHz
669 	} else if (info.device_type.Generation() == 9) {
670 		uint32 refInfo =
671 			(read32(info, ICL_DSSM) & ICL_DSSM_REF_FREQ_MASK) >> ICL_DSSM_REF_FREQ_SHIFT;
672 		switch (refInfo) {
673 			case ICL_DSSM_24000:
674 				info.shared_info->pll_info.reference_frequency = 24000;	// 24 MHz
675 				break;
676 			case ICL_DSSM_19200:
677 				info.shared_info->pll_info.reference_frequency = 19200;	// 19.2 MHz
678 				break;
679 			case ICL_DSSM_38400:
680 				info.shared_info->pll_info.reference_frequency = 38400;	// 38.4 MHz
681 				break;
682 			default:
683 				ERROR("error: unknown ref. freq. strap, using 24Mhz! %" B_PRIx32 "\n", refInfo);
684 				info.shared_info->pll_info.reference_frequency = 24000;	// 24 MHz
685 				break;
686 		}
687 		info.shared_info->pll_info.max_frequency = 350000;
688 			// 350 MHz RAM DAC speed
689 		info.shared_info->pll_info.min_frequency = 25000;		// 25 MHz
690 	} else {
691 		info.shared_info->pll_info.reference_frequency = 48000;	// 48 MHz
692 		info.shared_info->pll_info.max_frequency = 350000;
693 			// 350 MHz RAM DAC speed
694 		info.shared_info->pll_info.min_frequency = 25000;		// 25 MHz
695 	}
696 
697 	info.shared_info->pll_info.divisor_register = INTEL_DISPLAY_A_PLL_DIVISOR_0;
698 
699 	info.shared_info->pch_info = info.pch_info;
700 
701 	info.shared_info->device_type = info.device_type;
702 #ifdef __HAIKU__
703 	strlcpy(info.shared_info->device_identifier, info.device_identifier,
704 		sizeof(info.shared_info->device_identifier));
705 #else
706 	strcpy(info.shared_info->device_identifier, info.device_identifier);
707 #endif
708 
709 	// setup overlay registers
710 
711 	status_t status = intel_allocate_memory(info, B_PAGE_SIZE, 0,
712 		intel_uses_physical_overlay(*info.shared_info)
713 				? B_APERTURE_NEED_PHYSICAL : 0,
714 		(addr_t*)&info.overlay_registers,
715 		&info.shared_info->physical_overlay_registers);
716 	if (status == B_OK) {
717 		info.shared_info->overlay_offset = (addr_t)info.overlay_registers
718 			- info.aperture_base;
719 		TRACE("Overlay registers mapped at 0x%" B_PRIx32 " = %p - %"
720 			B_PRIxADDR " (%" B_PRIxPHYSADDR ")\n",
721 			info.shared_info->overlay_offset, info.overlay_registers,
722 			info.aperture_base, info.shared_info->physical_overlay_registers);
723 		init_overlay_registers(info.overlay_registers);
724 	} else {
725 		ERROR("error: could not allocate overlay memory! %s\n",
726 			strerror(status));
727 	}
728 
729 	// Allocate hardware status page and the cursor memory
730 	TRACE("Allocating hardware status page");
731 
732 	if (intel_allocate_memory(info, B_PAGE_SIZE, 0, B_APERTURE_NEED_PHYSICAL,
733 			(addr_t*)info.shared_info->status_page,
734 			&info.shared_info->physical_status_page) == B_OK) {
735 		// TODO: set status page
736 	}
737 	if (hardwareCursor) {
738 		intel_allocate_memory(info, B_PAGE_SIZE, 0, B_APERTURE_NEED_PHYSICAL,
739 			(addr_t*)&info.shared_info->cursor_memory,
740 			&info.shared_info->physical_cursor_memory);
741 	}
742 
743 	edid1_info* edidInfo = (edid1_info*)get_boot_item(VESA_EDID_BOOT_INFO,
744 		NULL);
745 	if (edidInfo != NULL) {
746 		info.shared_info->has_vesa_edid_info = true;
747 		memcpy(&info.shared_info->vesa_edid_info, edidInfo, sizeof(edid1_info));
748 	}
749 
750 	init_interrupt_handler(info);
751 
752 	if (hasPCH) {
753 		if (info.device_type.Generation() == 5) {
754 			info.shared_info->fdi_link_frequency = (read32(info, FDI_PLL_BIOS_0)
755 				& FDI_PLL_FB_CLOCK_MASK) + 2;
756 			info.shared_info->fdi_link_frequency *= 100;
757 		} else {
758 			info.shared_info->fdi_link_frequency = 2700;
759 		}
760 	} else {
761 		info.shared_info->fdi_link_frequency = 0;
762 	}
763 
764 	TRACE("%s: completed successfully!\n", __func__);
765 	return B_OK;
766 }
767 
768 
769 void
770 intel_extreme_uninit(intel_info &info)
771 {
772 	CALLED();
773 
774 	if (!info.fake_interrupts && info.shared_info->vblank_sem > 0) {
775 		// disable interrupt generation
776 		if (info.device_type.Generation() >= 8) {
777 			bdw_enable_global_interrupts(info, false);
778 			remove_io_interrupt_handler(info.irq, bdw_interrupt_handler, &info);
779 		} else {
780 			write32(info, find_reg(info, INTEL_INTERRUPT_ENABLED), 0);
781 			write32(info, find_reg(info, INTEL_INTERRUPT_MASK), ~0);
782 			remove_io_interrupt_handler(info.irq, intel_interrupt_handler, &info);
783 		}
784 
785 		if (info.use_msi && gPCIx86Module != NULL) {
786 			gPCIx86Module->disable_msi(info.pci->bus,
787 				info.pci->device, info.pci->function);
788 			gPCIx86Module->unconfigure_msi(info.pci->bus,
789 				info.pci->device, info.pci->function);
790 		}
791 	}
792 
793 	gGART->unmap_aperture(info.aperture);
794 
795 	delete_area(info.registers_area);
796 	delete_area(info.shared_area);
797 }
798 
799