xref: /haiku/src/add-ons/kernel/drivers/graphics/intel_extreme/intel_extreme.cpp (revision ab3c8bea07449c2305e9b9ad10421e316ce1d453)
1 /*
2  * Copyright 2006-2018, Haiku, Inc. All Rights Reserved.
3  * Distributed under the terms of the MIT License.
4  *
5  * Authors:
6  *		Axel Dörfler, axeld@pinc-software.de
7  *		Alexander von Gluck IV, kallisti5@unixzen.com
8  *		Adrien Destugues, pulkomandy@pulkomandy.tk
9  */
10 
11 
12 #include "intel_extreme.h"
13 
14 #include <unistd.h>
15 #include <stdio.h>
16 #include <string.h>
17 #include <errno.h>
18 
19 #include <AreaKeeper.h>
20 #include <boot_item.h>
21 #include <driver_settings.h>
22 #include <util/kernel_cpp.h>
23 
24 #include <vesa_info.h>
25 
26 #include "driver.h"
27 #include "power.h"
28 #include "utility.h"
29 
30 
31 #define TRACE_INTELEXTREME
32 #ifdef TRACE_INTELEXTREME
33 #	define TRACE(x...) dprintf("intel_extreme: " x)
34 #else
35 #	define TRACE(x) ;
36 #endif
37 
38 #define ERROR(x...) dprintf("intel_extreme: " x)
39 #define CALLED(x...) TRACE("intel_extreme: CALLED %s\n", __PRETTY_FUNCTION__)
40 
41 
42 static void
43 init_overlay_registers(overlay_registers* _registers)
44 {
45 	user_memset(_registers, 0, B_PAGE_SIZE);
46 
47 	overlay_registers registers;
48 	memset(&registers, 0, sizeof(registers));
49 	registers.contrast_correction = 0x48;
50 	registers.saturation_cos_correction = 0x9a;
51 		// this by-passes contrast and saturation correction
52 
53 	user_memcpy(_registers, &registers, sizeof(overlay_registers));
54 }
55 
56 
57 static void
58 read_settings(bool &hardwareCursor)
59 {
60 	hardwareCursor = false;
61 
62 	void* settings = load_driver_settings("intel_extreme");
63 	if (settings != NULL) {
64 		hardwareCursor = get_driver_boolean_parameter(settings,
65 			"hardware_cursor", true, true);
66 
67 		unload_driver_settings(settings);
68 	}
69 }
70 
71 
72 static int32
73 release_vblank_sem(intel_info &info)
74 {
75 	int32 count;
76 	if (get_sem_count(info.shared_info->vblank_sem, &count) == B_OK
77 		&& count < 0) {
78 		release_sem_etc(info.shared_info->vblank_sem, -count,
79 			B_DO_NOT_RESCHEDULE);
80 		return B_INVOKE_SCHEDULER;
81 	}
82 
83 	return B_HANDLED_INTERRUPT;
84 }
85 
86 
87 static void
88 gen8_enable_interrupts(intel_info& info, pipe_index pipe, bool enable)
89 {
90 	ASSERT(pipe != INTEL_PIPE_ANY);
91 	ASSERT(info.device_type.Generation() >= 12 || pipe != INTEL_PIPE_D);
92 
93 	const uint32 regMask = PCH_INTERRUPT_PIPE_MASK_BDW(pipe);
94 	const uint32 regEnabled = PCH_INTERRUPT_PIPE_ENABLED_BDW(pipe);
95 	const uint32 regIdentity = PCH_INTERRUPT_PIPE_IDENTITY_BDW(pipe);
96 	const uint32 value = enable ? PCH_INTERRUPT_VBLANK_BDW : 0;
97 	write32(info, regIdentity, ~0);
98 	write32(info, regEnabled, value);
99 	write32(info, regMask, ~value);
100 }
101 
102 
103 static uint32
104 gen11_enable_global_interrupts(intel_info& info, bool enable)
105 {
106 	write32(info, GEN11_GFX_MSTR_IRQ, enable ? GEN11_MASTER_IRQ : 0);
107 	return enable ? 0 : read32(info, GEN11_GFX_MSTR_IRQ);
108 }
109 
110 
111 static uint32
112 gen8_enable_global_interrupts(intel_info& info, bool enable)
113 {
114 	write32(info, PCH_MASTER_INT_CTL_BDW, enable ? PCH_MASTER_INT_CTL_GLOBAL_BDW : 0);
115 	return enable ? 0 : read32(info, PCH_MASTER_INT_CTL_BDW);
116 }
117 
118 
119 /*!
120 	Checks interrupt status with provided master interrupt control register.
121 	For Gen8 to Gen11.
122 */
123 static int32
124 gen8_handle_interrupts(intel_info& info, uint32 interrupt)
125 {
126 	int32 handled = B_HANDLED_INTERRUPT;
127 	if ((interrupt & PCH_MASTER_INT_CTL_PIPE_PENDING_BDW(INTEL_PIPE_A)) != 0) {
128 		const uint32 regIdentity = PCH_INTERRUPT_PIPE_IDENTITY_BDW(INTEL_PIPE_A);
129 		uint32 identity = read32(info, regIdentity);
130 		if ((identity & PCH_INTERRUPT_VBLANK_BDW) != 0) {
131 			handled = release_vblank_sem(info);
132 			write32(info, regIdentity, identity | PCH_INTERRUPT_VBLANK_BDW);
133 		} else {
134 			dprintf("gen8_handle_interrupts unhandled interrupt on pipe A\n");
135 		}
136 		interrupt &= ~PCH_MASTER_INT_CTL_PIPE_PENDING_BDW(INTEL_PIPE_A);
137 	}
138 	if ((interrupt & PCH_MASTER_INT_CTL_PIPE_PENDING_BDW(INTEL_PIPE_B)) != 0) {
139 		const uint32 regIdentity = PCH_INTERRUPT_PIPE_IDENTITY_BDW(INTEL_PIPE_B);
140 		uint32 identity = read32(info, regIdentity);
141 		if ((identity & PCH_INTERRUPT_VBLANK_BDW) != 0) {
142 			handled = release_vblank_sem(info);
143 			write32(info, regIdentity, identity | PCH_INTERRUPT_VBLANK_BDW);
144 		} else {
145 			dprintf("gen8_handle_interrupts unhandled interrupt on pipe B\n");
146 		}
147 		interrupt &= ~PCH_MASTER_INT_CTL_PIPE_PENDING_BDW(INTEL_PIPE_B);
148 	}
149 	if ((interrupt & PCH_MASTER_INT_CTL_PIPE_PENDING_BDW(INTEL_PIPE_C)) != 0) {
150 		const uint32 regIdentity = PCH_INTERRUPT_PIPE_IDENTITY_BDW(INTEL_PIPE_C);
151 		uint32 identity = read32(info, regIdentity);
152 		if ((identity & PCH_INTERRUPT_VBLANK_BDW) != 0) {
153 			handled = release_vblank_sem(info);
154 			write32(info, regIdentity, identity | PCH_INTERRUPT_VBLANK_BDW);
155 		} else {
156 			dprintf("gen8_handle_interrupts unhandled interrupt on pipe C\n");
157 		}
158 		interrupt &= ~PCH_MASTER_INT_CTL_PIPE_PENDING_BDW(INTEL_PIPE_C);
159 	}
160 
161 	uint32 iir = 0;
162 	if ((interrupt & GEN8_DE_PORT_IRQ) != 0) {
163 		iir = read32(info, GEN8_DE_PORT_IIR);
164 		if (iir != 0) {
165 			write32(info, GEN8_DE_PORT_IIR, iir);
166 		}
167 		interrupt &= ~GEN8_DE_PORT_IRQ;
168 	}
169 
170 	interrupt &= ~PCH_MASTER_INT_CTL_GLOBAL_BDW;
171 	if (interrupt != 0)
172 		dprintf("gen8_handle_interrupts unhandled %" B_PRIx32 "\n", interrupt);
173 	return handled;
174 }
175 
176 
177 
178 /** Get the appropriate interrupt mask for enabling or testing interrupts on
179  * the given pipe.
180  *
181  * The bits to test or set are different depending on the hardware generation.
182  *
183  * \param info Intel_extreme driver information
184  * \param pipe pipe to use
185  * \param enable true to get the mask for enabling the interrupts, false to get
186  *               the mask for testing them.
187  */
188 static uint32
189 intel_get_interrupt_mask(intel_info& info, pipe_index pipe, bool enable)
190 {
191 	uint32 mask = 0;
192 	bool hasPCH = info.pch_info != INTEL_PCH_NONE;
193 
194 	// Intel changed the PCH register mapping between Sandy Bridge and the
195 	// later generations (Ivy Bridge and up).
196 	// The PCH register itself does not exist in pre-PCH platforms, and the
197 	// previous interrupt register of course also had a different mapping.
198 
199 	if (pipe == INTEL_PIPE_A) {
200 		if (info.device_type.InGroup(INTEL_GROUP_SNB)
201 				|| info.device_type.InGroup(INTEL_GROUP_ILK))
202 			mask |= PCH_INTERRUPT_VBLANK_PIPEA_SNB;
203 		else if (hasPCH)
204 			mask |= PCH_INTERRUPT_VBLANK_PIPEA;
205 		else
206 			mask |= INTERRUPT_VBLANK_PIPEA;
207 	}
208 
209 	if (pipe == INTEL_PIPE_B) {
210 		if (info.device_type.InGroup(INTEL_GROUP_SNB)
211 				|| info.device_type.InGroup(INTEL_GROUP_ILK))
212 			mask |= PCH_INTERRUPT_VBLANK_PIPEB_SNB;
213 		else if (hasPCH)
214 			mask |= PCH_INTERRUPT_VBLANK_PIPEB;
215 		else
216 			mask |= INTERRUPT_VBLANK_PIPEB;
217 	}
218 
219 #if 0 // FIXME enable when we support the 3rd pipe
220 	if (pipe == INTEL_PIPE_C) {
221 		// Older generations only had two pipes
222 		if (hasPCH && info.device_type.Generation() > 6)
223 			mask |= PCH_INTERRUPT_VBLANK_PIPEC;
224 	}
225 #endif
226 
227 	// On SandyBridge, there is an extra "global enable" flag, which must also
228 	// be set when enabling the interrupts (but not when testing for them).
229 	if (enable && info.device_type.InFamily(INTEL_FAMILY_SER5))
230 		mask |= PCH_INTERRUPT_GLOBAL_SNB;
231 
232 	return mask;
233 }
234 
235 
236 static void
237 intel_enable_interrupts(intel_info& info, pipes which, bool enable)
238 {
239 	uint32 finalMask = 0;
240 	const uint32 pipeAMask = intel_get_interrupt_mask(info, INTEL_PIPE_A, true);
241 	const uint32 pipeBMask = intel_get_interrupt_mask(info, INTEL_PIPE_B, true);
242 	if (which.HasPipe(INTEL_PIPE_A))
243 		finalMask |= pipeAMask;
244 	if (which.HasPipe(INTEL_PIPE_B))
245 		finalMask |= pipeBMask;
246 
247 	const uint32 value = enable ? finalMask : 0;
248 
249 	// Clear all the interrupts
250 	write32(info, find_reg(info, INTEL_INTERRUPT_IDENTITY), ~0);
251 
252 	// enable interrupts - we only want VBLANK interrupts
253 	write32(info, find_reg(info, INTEL_INTERRUPT_ENABLED), value);
254 	write32(info, find_reg(info, INTEL_INTERRUPT_MASK), ~value);
255 }
256 
257 
258 static bool
259 intel_check_interrupt(intel_info& info, pipes& which)
260 {
261 	which.ClearPipe(INTEL_PIPE_ANY);
262 	const uint32 pipeAMask = intel_get_interrupt_mask(info, INTEL_PIPE_A, false);
263 	const uint32 pipeBMask = intel_get_interrupt_mask(info, INTEL_PIPE_B, false);
264 	const uint32 regIdentity = find_reg(info, INTEL_INTERRUPT_IDENTITY);
265 	const uint32 interrupt = read32(info, regIdentity);
266 	if ((interrupt & pipeAMask) != 0)
267 		which.SetPipe(INTEL_PIPE_A);
268 	if ((interrupt & pipeBMask) != 0)
269 		which.SetPipe(INTEL_PIPE_B);
270 	return which.HasPipe(INTEL_PIPE_ANY);
271 }
272 
273 
274 static void
275 g35_clear_interrupt_status(intel_info& info, pipe_index pipe)
276 {
277 	// These registers do not exist on later GPUs.
278 	if (info.device_type.Generation() > 4)
279 		return;
280 
281 	const uint32 value = DISPLAY_PIPE_VBLANK_STATUS | DISPLAY_PIPE_VBLANK_ENABLED;
282 	switch (pipe) {
283 		case INTEL_PIPE_A:
284 			write32(info, INTEL_DISPLAY_A_PIPE_STATUS, value);
285 			break;
286 		case INTEL_PIPE_B:
287 			write32(info, INTEL_DISPLAY_B_PIPE_STATUS, value);
288 			break;
289 		default:
290 			break;
291 	}
292 }
293 
294 
295 static void
296 intel_clear_pipe_interrupt(intel_info& info, pipe_index pipe)
297 {
298 	// On G35/G45, prior to clearing Display Pipe interrupt in IIR
299 	// the corresponding interrupt status must first be cleared.
300 	g35_clear_interrupt_status(info, pipe);
301 
302 	const uint32 regIdentity = find_reg(info, INTEL_INTERRUPT_IDENTITY);
303 	const uint32 bit = intel_get_interrupt_mask(info, pipe, false);
304 	const uint32 identity = read32(info, regIdentity);
305 	write32(info, regIdentity, identity | bit);
306 }
307 
308 
309 /*!
310 	Interrupt routine for Gen8 and Gen9.
311 	See Gen12 Display Engine: Interrupt Service Routine chapter.
312 */
313 static int32
314 gen8_interrupt_handler(void* data)
315 {
316 	intel_info& info = *(intel_info*)data;
317 
318 	uint32 interrupt = gen8_enable_global_interrupts(info, false);
319 	if (interrupt == 0) {
320 		gen8_enable_global_interrupts(info, true);
321 		return B_UNHANDLED_INTERRUPT;
322 	}
323 
324 	int32 handled = gen8_handle_interrupts(info, interrupt);
325 
326 	gen8_enable_global_interrupts(info, true);
327 	return handled;
328 }
329 
330 
331 /*!
332 	Interrupt routine for Gen11.
333 	See Gen12 Display Engine: Interrupt Service Routine chapter.
334 */
335 static int32
336 gen11_interrupt_handler(void* data)
337 {
338 	intel_info& info = *(intel_info*)data;
339 
340 	uint32 interrupt = gen11_enable_global_interrupts(info, false);
341 
342 	if (interrupt == 0) {
343 		gen11_enable_global_interrupts(info, true);
344 		return B_UNHANDLED_INTERRUPT;
345 	}
346 
347 	int32 handled = B_HANDLED_INTERRUPT;
348 	if ((interrupt & GEN11_DISPLAY_IRQ) != 0)
349 		handled = gen8_handle_interrupts(info, read32(info, GEN11_DISPLAY_INT_CTL));
350 
351 	gen11_enable_global_interrupts(info, true);
352 	return handled;
353 }
354 
355 
356 static int32
357 intel_interrupt_handler(void* data)
358 {
359 	intel_info &info = *(intel_info*)data;
360 
361 	pipes which;
362 	bool shouldHandle = intel_check_interrupt(info, which);
363 
364 	if (!shouldHandle)
365 		return B_UNHANDLED_INTERRUPT;
366 
367 	int32 handled = B_HANDLED_INTERRUPT;
368 
369 	while (shouldHandle) {
370 		if (which.HasPipe(INTEL_PIPE_A)) {
371 			handled = release_vblank_sem(info);
372 
373 			intel_clear_pipe_interrupt(info, INTEL_PIPE_A);
374 		}
375 
376 		if (which.HasPipe(INTEL_PIPE_B)) {
377 			handled = release_vblank_sem(info);
378 
379 			intel_clear_pipe_interrupt(info, INTEL_PIPE_B);
380 		}
381 
382 #if 0
383 		// FIXME we don't have support for the 3rd pipe yet
384 		if (which.HasPipe(INTEL_PIPE_C)) {
385 			handled = release_vblank_sem(info);
386 
387 			intel_clear_pipe_interrupt(info, INTEL_PIPE_C);
388 		}
389 #endif
390 
391 		shouldHandle = intel_check_interrupt(info, which);
392 	}
393 
394 	return handled;
395 }
396 
397 
398 static void
399 init_interrupt_handler(intel_info &info)
400 {
401 	info.shared_info->vblank_sem = create_sem(0, "intel extreme vblank");
402 	if (info.shared_info->vblank_sem < B_OK)
403 		return;
404 
405 	status_t status = B_OK;
406 
407 	// We need to change the owner of the sem to the calling team (usually the
408 	// app_server), because userland apps cannot acquire kernel semaphores
409 	thread_id thread = find_thread(NULL);
410 	thread_info threadInfo;
411 	if (get_thread_info(thread, &threadInfo) != B_OK
412 		|| set_sem_owner(info.shared_info->vblank_sem, threadInfo.team)
413 			!= B_OK) {
414 		status = B_ERROR;
415 	}
416 
417 	// Find the right interrupt vector, using MSIs if available.
418 	info.irq = 0xff;
419 	info.use_msi = false;
420 	if (info.pci->u.h0.interrupt_pin != 0x00)
421 		info.irq = info.pci->u.h0.interrupt_line;
422 	if (gPCIx86Module != NULL && gPCIx86Module->get_msi_count(info.pci->bus,
423 			info.pci->device, info.pci->function) >= 1) {
424 		uint8 msiVector = 0;
425 		if (gPCIx86Module->configure_msi(info.pci->bus, info.pci->device,
426 				info.pci->function, 1, &msiVector) == B_OK
427 			&& gPCIx86Module->enable_msi(info.pci->bus, info.pci->device,
428 				info.pci->function) == B_OK) {
429 			TRACE("using message signaled interrupts\n");
430 			info.irq = msiVector;
431 			info.use_msi = true;
432 		}
433 	}
434 
435 	if (status == B_OK && info.irq != 0xff) {
436 		// we've gotten an interrupt line for us to use
437 
438 		info.fake_interrupts = false;
439 
440 		if (info.device_type.Generation() >= 8) {
441 			interrupt_handler handler = &gen8_interrupt_handler;
442 			if (info.device_type.Generation() >= 11)
443 				handler = &gen11_interrupt_handler;
444 			status = install_io_interrupt_handler(info.irq,
445 				handler, (void*)&info, 0);
446 			if (status == B_OK) {
447 				gen8_enable_interrupts(info, INTEL_PIPE_A, true);
448 				gen8_enable_interrupts(info, INTEL_PIPE_B, true);
449 				if (info.device_type.Generation() >= 11)
450 					gen8_enable_interrupts(info, INTEL_PIPE_C, true);
451 				gen8_enable_global_interrupts(info, true);
452 
453 				if (info.device_type.Generation() >= 11) {
454 					uint32 mask = GEN8_AUX_CHANNEL_A;
455 					mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | GEN9_AUX_CHANNEL_D;
456 					mask |= CNL_AUX_CHANNEL_F;
457 					mask |= ICL_AUX_CHANNEL_E;
458 					read32(info, GEN8_DE_PORT_ISR);
459 					write32(info, GEN8_DE_PORT_IER, mask);
460 					write32(info, GEN8_DE_PORT_IMR, ~mask);
461 					read32(info, GEN8_DE_PORT_IMR);
462 
463 					read32(info, GEN8_DE_MISC_ISR);
464 					write32(info, GEN8_DE_MISC_IER, GEN8_DE_EDP_PSR);
465 					write32(info, GEN8_DE_MISC_IMR, ~GEN8_DE_EDP_PSR);
466 					read32(info, GEN8_DE_MISC_IMR);
467 
468 					read32(info, GEN11_GU_MISC_IIR);
469 					write32(info, GEN11_GU_MISC_IER, GEN11_GU_MISC_GSE);
470 					write32(info, GEN11_GU_MISC_IMR, ~GEN11_GU_MISC_GSE);
471 					read32(info, GEN11_GU_MISC_IMR);
472 					// Missing: Hotplug
473 					gen11_enable_global_interrupts(info, true);
474 				}
475 			}
476 		} else {
477 			status = install_io_interrupt_handler(info.irq,
478 				&intel_interrupt_handler, (void*)&info, 0);
479 			if (status == B_OK) {
480 				g35_clear_interrupt_status(info, INTEL_PIPE_A);
481 				g35_clear_interrupt_status(info, INTEL_PIPE_B);
482 
483 				pipes which;
484 				which.SetPipe(INTEL_PIPE_A);
485 				which.SetPipe(INTEL_PIPE_B);
486 				intel_enable_interrupts(info, which, true);
487 			}
488 		}
489 	}
490 	if (status < B_OK) {
491 		// There is no interrupt reserved for us, or we couldn't install our
492 		// interrupt handler, let's fake the vblank interrupt for our clients
493 		// using a timer interrupt
494 		info.fake_interrupts = true;
495 
496 		// TODO: fake interrupts!
497 		ERROR("Fake interrupt mode (no PCI interrupt line assigned\n");
498 		status = B_ERROR;
499 	}
500 
501 	if (status < B_OK) {
502 		delete_sem(info.shared_info->vblank_sem);
503 		info.shared_info->vblank_sem = B_ERROR;
504 	}
505 }
506 
507 
508 //	#pragma mark -
509 
510 
511 status_t
512 intel_free_memory(intel_info &info, addr_t base)
513 {
514 	return gGART->free_memory(info.aperture, base);
515 }
516 
517 
518 status_t
519 intel_allocate_memory(intel_info &info, size_t size, size_t alignment,
520 	uint32 flags, addr_t* _base, phys_addr_t* _physicalBase)
521 {
522 	return gGART->allocate_memory(info.aperture, size, alignment,
523 		flags, _base, _physicalBase);
524 }
525 
526 
527 status_t
528 intel_extreme_init(intel_info &info)
529 {
530 	CALLED();
531 	info.aperture = gGART->map_aperture(info.pci->bus, info.pci->device,
532 		info.pci->function, 0, &info.aperture_base);
533 	if (info.aperture < B_OK) {
534 		ERROR("error: could not map GART aperture! (%s)\n",
535 			strerror(info.aperture));
536 		return info.aperture;
537 	}
538 
539 	AreaKeeper sharedCreator;
540 	info.shared_area = sharedCreator.Create("intel extreme shared info",
541 		(void**)&info.shared_info, B_ANY_KERNEL_ADDRESS,
542 		ROUND_TO_PAGE_SIZE(sizeof(intel_shared_info)) + 3 * B_PAGE_SIZE,
543 		B_FULL_LOCK,
544 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA | B_CLONEABLE_AREA);
545 	if (info.shared_area < B_OK) {
546 		ERROR("error: could not create shared area!\n");
547 		gGART->unmap_aperture(info.aperture);
548 		return info.shared_area;
549 	}
550 
551 	// enable power
552 	gPCI->set_powerstate(info.pci->bus, info.pci->device, info.pci->function,
553 		PCI_pm_state_d0);
554 
555 	memset((void*)info.shared_info, 0, sizeof(intel_shared_info));
556 
557 	int mmioIndex = 1;
558 	if (info.device_type.Generation() >= 3) {
559 		// For some reason Intel saw the need to change the order of the
560 		// mappings with the introduction of the i9xx family
561 		mmioIndex = 0;
562 	}
563 
564 	// evaluate driver settings, if any
565 
566 	bool hardwareCursor;
567 	read_settings(hardwareCursor);
568 
569 	// memory mapped I/O
570 
571 	// TODO: registers are mapped twice (by us and intel_gart), maybe we
572 	// can share it between the drivers
573 
574 	phys_addr_t addr = info.pci->u.h0.base_registers[mmioIndex];
575 	uint64 barSize = info.pci->u.h0.base_register_sizes[mmioIndex];
576 	if ((info.pci->u.h0.base_register_flags[mmioIndex] & PCI_address_type) == PCI_address_type_64) {
577 		addr |= (uint64)info.pci->u.h0.base_registers[mmioIndex + 1] << 32;
578 		barSize |= (uint64)info.pci->u.h0.base_register_sizes[mmioIndex + 1] << 32;
579 	}
580 	AreaKeeper mmioMapper;
581 	info.registers_area = mmioMapper.Map("intel extreme mmio", addr, barSize,
582 		B_ANY_KERNEL_ADDRESS,
583 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA | B_CLONEABLE_AREA,
584 		(void**)&info.registers);
585 	if (mmioMapper.InitCheck() < B_OK) {
586 		ERROR("error: could not map memory I/O!\n");
587 		gGART->unmap_aperture(info.aperture);
588 		return info.registers_area;
589 	}
590 
591 	bool hasPCH = (info.pch_info != INTEL_PCH_NONE);
592 
593 	ERROR("Init Intel generation %d GPU %s PCH split.\n",
594 		info.device_type.Generation(), hasPCH ? "with" : "without");
595 
596 	uint32* blocks = info.shared_info->register_blocks;
597 	blocks[REGISTER_BLOCK(REGS_FLAT)] = 0;
598 
599 	// setup the register blocks for the different architectures
600 	if (hasPCH) {
601 		// PCH based platforms (IronLake through ultra-low-power Broadwells)
602 		blocks[REGISTER_BLOCK(REGS_NORTH_SHARED)]
603 			= PCH_NORTH_SHARED_REGISTER_BASE;
604 		blocks[REGISTER_BLOCK(REGS_NORTH_PIPE_AND_PORT)]
605 			= PCH_NORTH_PIPE_AND_PORT_REGISTER_BASE;
606 		blocks[REGISTER_BLOCK(REGS_NORTH_PLANE_CONTROL)]
607 			= PCH_NORTH_PLANE_CONTROL_REGISTER_BASE;
608 		blocks[REGISTER_BLOCK(REGS_SOUTH_SHARED)]
609 			= PCH_SOUTH_SHARED_REGISTER_BASE;
610 		blocks[REGISTER_BLOCK(REGS_SOUTH_TRANSCODER_PORT)]
611 			= PCH_SOUTH_TRANSCODER_AND_PORT_REGISTER_BASE;
612 	} else {
613 		// (G)MCH/ICH based platforms
614 		blocks[REGISTER_BLOCK(REGS_NORTH_SHARED)]
615 			= MCH_SHARED_REGISTER_BASE;
616 		blocks[REGISTER_BLOCK(REGS_NORTH_PIPE_AND_PORT)]
617 			= MCH_PIPE_AND_PORT_REGISTER_BASE;
618 		blocks[REGISTER_BLOCK(REGS_NORTH_PLANE_CONTROL)]
619 			= MCH_PLANE_CONTROL_REGISTER_BASE;
620 		blocks[REGISTER_BLOCK(REGS_SOUTH_SHARED)]
621 			= ICH_SHARED_REGISTER_BASE;
622 		blocks[REGISTER_BLOCK(REGS_SOUTH_TRANSCODER_PORT)]
623 			= ICH_PORT_REGISTER_BASE;
624 	}
625 
626 	// Everything in the display PRM gets +0x180000
627 	if (info.device_type.InGroup(INTEL_GROUP_VLV)) {
628 		// "I nearly got violent with the hw guys when they told me..."
629 		blocks[REGISTER_BLOCK(REGS_SOUTH_SHARED)] += VLV_DISPLAY_BASE;
630 		blocks[REGISTER_BLOCK(REGS_SOUTH_TRANSCODER_PORT)] += VLV_DISPLAY_BASE;
631 	}
632 
633 	TRACE("REGS_NORTH_SHARED: 0x%" B_PRIx32 "\n",
634 		blocks[REGISTER_BLOCK(REGS_NORTH_SHARED)]);
635 	TRACE("REGS_NORTH_PIPE_AND_PORT: 0x%" B_PRIx32 "\n",
636 		blocks[REGISTER_BLOCK(REGS_NORTH_PIPE_AND_PORT)]);
637 	TRACE("REGS_NORTH_PLANE_CONTROL: 0x%" B_PRIx32 "\n",
638 		blocks[REGISTER_BLOCK(REGS_NORTH_PLANE_CONTROL)]);
639 	TRACE("REGS_SOUTH_SHARED: 0x%" B_PRIx32 "\n",
640 		blocks[REGISTER_BLOCK(REGS_SOUTH_SHARED)]);
641 	TRACE("REGS_SOUTH_TRANSCODER_PORT: 0x%" B_PRIx32 "\n",
642 		blocks[REGISTER_BLOCK(REGS_SOUTH_TRANSCODER_PORT)]);
643 
644 	// make sure bus master, memory-mapped I/O, and frame buffer is enabled
645 	set_pci_config(info.pci, PCI_command, 2, get_pci_config(info.pci,
646 		PCI_command, 2) | PCI_command_io | PCI_command_memory
647 		| PCI_command_master);
648 
649 	// reserve ring buffer memory (currently, this memory is placed in
650 	// the graphics memory), but this could bring us problems with
651 	// write combining...
652 
653 	ring_buffer &primary = info.shared_info->primary_ring_buffer;
654 	if (intel_allocate_memory(info, 16 * B_PAGE_SIZE, 0, 0,
655 			(addr_t*)&primary.base) == B_OK) {
656 		primary.register_base = INTEL_PRIMARY_RING_BUFFER;
657 		primary.size = 16 * B_PAGE_SIZE;
658 		primary.offset = (addr_t)primary.base - info.aperture_base;
659 	}
660 
661 	// Enable clock gating
662 	intel_en_gating(info);
663 
664 	// Enable automatic gpu downclocking if we can to save power
665 	intel_en_downclock(info);
666 
667 	// no errors, so keep areas and mappings
668 	sharedCreator.Detach();
669 	mmioMapper.Detach();
670 
671 	aperture_info apertureInfo;
672 	gGART->get_aperture_info(info.aperture, &apertureInfo);
673 
674 	info.shared_info->registers_area = info.registers_area;
675 	info.shared_info->graphics_memory = (uint8*)info.aperture_base;
676 	info.shared_info->physical_graphics_memory = apertureInfo.physical_base;
677 	info.shared_info->graphics_memory_size = apertureInfo.size;
678 	info.shared_info->frame_buffer = 0;
679 	info.shared_info->dpms_mode = B_DPMS_ON;
680 	info.shared_info->min_brightness = 2;
681 
682 	// Pull VBIOS panel mode for later use
683 	info.shared_info->got_vbt = get_lvds_mode_from_bios(
684 		&info.shared_info->panel_timing, &info.shared_info->min_brightness);
685 
686 	/* at least 855gm can't drive more than one head at time */
687 	if (info.device_type.InFamily(INTEL_FAMILY_8xx))
688 		info.shared_info->single_head_locked = 1;
689 
690 	if (info.device_type.InFamily(INTEL_FAMILY_SER5)) {
691 		info.shared_info->pll_info.reference_frequency = 120000;// 120 MHz
692 		info.shared_info->pll_info.max_frequency = 350000;
693 			// 350 MHz RAM DAC speed
694 		info.shared_info->pll_info.min_frequency = 20000;		// 20 MHz
695 	} else if (info.device_type.InFamily(INTEL_FAMILY_9xx)) {
696 		info.shared_info->pll_info.reference_frequency = 96000;	// 96 MHz
697 		info.shared_info->pll_info.max_frequency = 400000;
698 			// 400 MHz RAM DAC speed
699 		info.shared_info->pll_info.min_frequency = 20000;		// 20 MHz
700 	} else if (info.device_type.HasDDI() && (info.device_type.Generation() <= 8)) {
701 		info.shared_info->pll_info.reference_frequency = 135000;// 135 MHz
702 		info.shared_info->pll_info.max_frequency = 350000;
703 			// 350 MHz RAM DAC speed
704 		info.shared_info->pll_info.min_frequency = 25000;		// 25 MHz
705 	} else if ((info.device_type.Generation() >= 9) &&
706 				info.device_type.InGroup(INTEL_GROUP_SKY)) {
707 		info.shared_info->pll_info.reference_frequency = 24000;	// 24 MHz
708 		info.shared_info->pll_info.max_frequency = 350000;
709 			// 350 MHz RAM DAC speed
710 		info.shared_info->pll_info.min_frequency = 25000;		// 25 MHz
711 	} else if (info.device_type.Generation() >= 9) {
712 		uint32 refInfo =
713 			(read32(info, ICL_DSSM) & ICL_DSSM_REF_FREQ_MASK) >> ICL_DSSM_REF_FREQ_SHIFT;
714 		switch (refInfo) {
715 			case ICL_DSSM_24000:
716 				info.shared_info->pll_info.reference_frequency = 24000;	// 24 MHz
717 				break;
718 			case ICL_DSSM_19200:
719 				info.shared_info->pll_info.reference_frequency = 19200;	// 19.2 MHz
720 				break;
721 			case ICL_DSSM_38400:
722 				info.shared_info->pll_info.reference_frequency = 38400;	// 38.4 MHz
723 				break;
724 			default:
725 				ERROR("error: unknown ref. freq. strap, using 24Mhz! %" B_PRIx32 "\n", refInfo);
726 				info.shared_info->pll_info.reference_frequency = 24000;	// 24 MHz
727 				break;
728 		}
729 		info.shared_info->pll_info.max_frequency = 350000;
730 			// 350 MHz RAM DAC speed
731 		info.shared_info->pll_info.min_frequency = 25000;		// 25 MHz
732 	} else {
733 		info.shared_info->pll_info.reference_frequency = 48000;	// 48 MHz
734 		info.shared_info->pll_info.max_frequency = 350000;
735 			// 350 MHz RAM DAC speed
736 		info.shared_info->pll_info.min_frequency = 25000;		// 25 MHz
737 	}
738 
739 	info.shared_info->pll_info.divisor_register = INTEL_DISPLAY_A_PLL_DIVISOR_0;
740 
741 	info.shared_info->pch_info = info.pch_info;
742 
743 	info.shared_info->device_type = info.device_type;
744 #ifdef __HAIKU__
745 	strlcpy(info.shared_info->device_identifier, info.device_identifier,
746 		sizeof(info.shared_info->device_identifier));
747 #else
748 	strcpy(info.shared_info->device_identifier, info.device_identifier);
749 #endif
750 
751 	// setup overlay registers
752 
753 	status_t status = intel_allocate_memory(info, B_PAGE_SIZE, 0,
754 		intel_uses_physical_overlay(*info.shared_info)
755 				? B_APERTURE_NEED_PHYSICAL : 0,
756 		(addr_t*)&info.overlay_registers,
757 		&info.shared_info->physical_overlay_registers);
758 	if (status == B_OK) {
759 		info.shared_info->overlay_offset = (addr_t)info.overlay_registers
760 			- info.aperture_base;
761 		TRACE("Overlay registers mapped at 0x%" B_PRIx32 " = %p - %"
762 			B_PRIxADDR " (%" B_PRIxPHYSADDR ")\n",
763 			info.shared_info->overlay_offset, info.overlay_registers,
764 			info.aperture_base, info.shared_info->physical_overlay_registers);
765 		init_overlay_registers(info.overlay_registers);
766 	} else {
767 		ERROR("error: could not allocate overlay memory! %s\n",
768 			strerror(status));
769 	}
770 
771 	// Allocate hardware status page and the cursor memory
772 	TRACE("Allocating hardware status page");
773 
774 	if (intel_allocate_memory(info, B_PAGE_SIZE, 0, B_APERTURE_NEED_PHYSICAL,
775 			(addr_t*)info.shared_info->status_page,
776 			&info.shared_info->physical_status_page) == B_OK) {
777 		// TODO: set status page
778 	}
779 	if (hardwareCursor) {
780 		intel_allocate_memory(info, B_PAGE_SIZE, 0, B_APERTURE_NEED_PHYSICAL,
781 			(addr_t*)&info.shared_info->cursor_memory,
782 			&info.shared_info->physical_cursor_memory);
783 	}
784 
785 	edid1_info* edidInfo = (edid1_info*)get_boot_item(VESA_EDID_BOOT_INFO,
786 		NULL);
787 	if (edidInfo != NULL) {
788 		info.shared_info->has_vesa_edid_info = true;
789 		memcpy(&info.shared_info->vesa_edid_info, edidInfo, sizeof(edid1_info));
790 	}
791 
792 	init_interrupt_handler(info);
793 
794 	if (hasPCH) {
795 		if (info.device_type.Generation() == 5) {
796 			info.shared_info->fdi_link_frequency = (read32(info, FDI_PLL_BIOS_0)
797 				& FDI_PLL_FB_CLOCK_MASK) + 2;
798 			info.shared_info->fdi_link_frequency *= 100;
799 		} else {
800 			info.shared_info->fdi_link_frequency = 2700;
801 		}
802 	} else {
803 		info.shared_info->fdi_link_frequency = 0;
804 	}
805 
806 	TRACE("%s: completed successfully!\n", __func__);
807 	return B_OK;
808 }
809 
810 
811 void
812 intel_extreme_uninit(intel_info &info)
813 {
814 	CALLED();
815 
816 	if (!info.fake_interrupts && info.shared_info->vblank_sem > 0) {
817 		// disable interrupt generation
818 		if (info.device_type.Generation() >= 8) {
819 			if (info.device_type.Generation() >= 11) {
820 				gen11_enable_global_interrupts(info, false);
821 			}
822 			gen8_enable_global_interrupts(info, false);
823 			interrupt_handler handler = &gen8_interrupt_handler;
824 			if (info.device_type.Generation() >= 11)
825 				handler = &gen11_interrupt_handler;
826 			remove_io_interrupt_handler(info.irq, handler, &info);
827 		} else {
828 			write32(info, find_reg(info, INTEL_INTERRUPT_ENABLED), 0);
829 			write32(info, find_reg(info, INTEL_INTERRUPT_MASK), ~0);
830 			remove_io_interrupt_handler(info.irq, intel_interrupt_handler, &info);
831 		}
832 
833 		if (info.use_msi && gPCIx86Module != NULL) {
834 			gPCIx86Module->disable_msi(info.pci->bus,
835 				info.pci->device, info.pci->function);
836 			gPCIx86Module->unconfigure_msi(info.pci->bus,
837 				info.pci->device, info.pci->function);
838 		}
839 	}
840 
841 	gGART->unmap_aperture(info.aperture);
842 
843 	delete_area(info.registers_area);
844 	delete_area(info.shared_area);
845 }
846 
847