xref: /haiku/src/add-ons/kernel/drivers/graphics/intel_extreme/intel_extreme.cpp (revision 4c8e85b316c35a9161f5a1c50ad70bc91c83a76f)
1 /*
2  * Copyright 2006-2018, Haiku, Inc. All Rights Reserved.
3  * Distributed under the terms of the MIT License.
4  *
5  * Authors:
6  *		Axel Dörfler, axeld@pinc-software.de
7  *		Alexander von Gluck IV, kallisti5@unixzen.com
8  *		Adrien Destugues, pulkomandy@pulkomandy.tk
9  */
10 
11 
12 #include "intel_extreme.h"
13 
14 #include <unistd.h>
15 #include <stdio.h>
16 #include <string.h>
17 #include <errno.h>
18 
19 #include <AreaKeeper.h>
20 #include <boot_item.h>
21 #include <driver_settings.h>
22 #include <util/kernel_cpp.h>
23 
24 #include <vesa_info.h>
25 
26 #include "driver.h"
27 #include "power.h"
28 #include "utility.h"
29 
30 
31 #define TRACE_INTELEXTREME
32 #ifdef TRACE_INTELEXTREME
33 #	define TRACE(x...) dprintf("intel_extreme: " x)
34 #else
35 #	define TRACE(x) ;
36 #endif
37 
38 #define ERROR(x...) dprintf("intel_extreme: " x)
39 #define CALLED(x...) TRACE("intel_extreme: CALLED %s\n", __PRETTY_FUNCTION__)
40 
41 
42 static void
43 init_overlay_registers(overlay_registers* _registers)
44 {
45 	user_memset(_registers, 0, B_PAGE_SIZE);
46 
47 	overlay_registers registers;
48 	memset(&registers, 0, sizeof(registers));
49 	registers.contrast_correction = 0x48;
50 	registers.saturation_cos_correction = 0x9a;
51 		// this by-passes contrast and saturation correction
52 
53 	user_memcpy(_registers, &registers, sizeof(overlay_registers));
54 }
55 
56 
57 static void
58 read_settings(bool &hardwareCursor)
59 {
60 	hardwareCursor = false;
61 
62 	void* settings = load_driver_settings("intel_extreme");
63 	if (settings != NULL) {
64 		hardwareCursor = get_driver_boolean_parameter(settings,
65 			"hardware_cursor", true, true);
66 
67 		unload_driver_settings(settings);
68 	}
69 }
70 
71 
72 static int32
73 release_vblank_sem(intel_info &info)
74 {
75 	int32 count;
76 	if (get_sem_count(info.shared_info->vblank_sem, &count) == B_OK
77 		&& count < 0) {
78 		release_sem_etc(info.shared_info->vblank_sem, -count,
79 			B_DO_NOT_RESCHEDULE);
80 		return B_INVOKE_SCHEDULER;
81 	}
82 
83 	return B_HANDLED_INTERRUPT;
84 }
85 
86 
87 /** Get the appropriate interrupt mask for enabling or testing interrupts on
88  * the given pipes.
89  *
90  * The bits to test or set are different depending on the hardware generation.
91  *
92  * \param info Intel_extreme driver information
93  * \param pipes bit mask of the pipes to use
94  * \param enable true to get the mask for enabling the interrupts, false to get
95  *               the mask for testing them.
96  */
97 static uint32
98 intel_get_interrupt_mask(intel_info& info, int pipes, bool enable)
99 {
100 	uint32 mask = 0;
101 	bool hasPCH = info.pch_info != INTEL_PCH_NONE;
102 
103 	// Intel changed the PCH register mapping between Sandy Bridge and the
104 	// later generations (Ivy Bridge and up).
105 	// The PCH register itself does not exist in pre-PCH platforms, and the
106 	// previous interrupt register of course also had a different mapping.
107 
108 	if ((pipes & INTEL_PIPE_A) != 0) {
109 		if (info.device_type.InGroup(INTEL_GROUP_SNB)
110 				|| info.device_type.InGroup(INTEL_GROUP_ILK))
111 			mask |= PCH_INTERRUPT_VBLANK_PIPEA_SNB;
112 		else if (hasPCH)
113 			mask |= PCH_INTERRUPT_VBLANK_PIPEA;
114 		else
115 			mask |= INTERRUPT_VBLANK_PIPEA;
116 	}
117 
118 	if ((pipes & INTEL_PIPE_B) != 0) {
119 		if (info.device_type.InGroup(INTEL_GROUP_SNB)
120 				|| info.device_type.InGroup(INTEL_GROUP_ILK))
121 			mask |= PCH_INTERRUPT_VBLANK_PIPEB_SNB;
122 		else if (hasPCH)
123 			mask |= PCH_INTERRUPT_VBLANK_PIPEB;
124 		else
125 			mask |= INTERRUPT_VBLANK_PIPEB;
126 	}
127 
128 #if 0 // FIXME enable when we support the 3rd pipe
129 	if ((pipes & INTEL_PIPE_C) != 0) {
130 		// Older generations only had two pipes
131 		if (hasPCH && info.device_type.Generation() > 6)
132 			mask |= PCH_INTERRUPT_VBLANK_PIPEC;
133 	}
134 #endif
135 
136 	// On SandyBridge, there is an extra "global enable" flag, which must also
137 	// be set when enabling the interrupts (but not when testing for them).
138 	if (enable && info.device_type.InFamily(INTEL_FAMILY_SER5))
139 		mask |= PCH_INTERRUPT_GLOBAL_SNB;
140 
141 	return mask;
142 }
143 
144 
145 static int32
146 intel_interrupt_handler(void* data)
147 {
148 	intel_info &info = *(intel_info*)data;
149 	uint32 reg = find_reg(info, INTEL_INTERRUPT_IDENTITY);
150 	uint32 identity;
151 
152 	identity = read32(info, reg);
153 
154 	if (identity == 0)
155 		return B_UNHANDLED_INTERRUPT;
156 
157 	int32 handled = B_HANDLED_INTERRUPT;
158 
159 	while (identity != 0) {
160 
161 		uint32 mask = intel_get_interrupt_mask(info, INTEL_PIPE_A, false);
162 
163 		if ((identity & mask) != 0) {
164 			handled = release_vblank_sem(info);
165 
166 			// make sure we'll get another one of those
167 			write32(info, INTEL_DISPLAY_A_PIPE_STATUS,
168 				DISPLAY_PIPE_VBLANK_STATUS | DISPLAY_PIPE_VBLANK_ENABLED);
169 		}
170 
171 		mask = intel_get_interrupt_mask(info, INTEL_PIPE_B, false);
172 		if ((identity & mask) != 0) {
173 			handled = release_vblank_sem(info);
174 
175 			// make sure we'll get another one of those
176 			write32(info, INTEL_DISPLAY_B_PIPE_STATUS,
177 				DISPLAY_PIPE_VBLANK_STATUS | DISPLAY_PIPE_VBLANK_ENABLED);
178 		}
179 
180 #if 0
181 		// FIXME we don't have support for the 3rd pipe yet
182 		mask = hasPCH ? PCH_INTERRUPT_VBLANK_PIPEC
183 			: 0;
184 		if ((identity & mask) != 0) {
185 			handled = release_vblank_sem(info);
186 
187 			// make sure we'll get another one of those
188 			write32(info, INTEL_DISPLAY_C_PIPE_STATUS,
189 				DISPLAY_PIPE_VBLANK_STATUS | DISPLAY_PIPE_VBLANK_ENABLED);
190 		}
191 #endif
192 
193 		// setting the bit clears it!
194 		write32(info, reg, identity);
195 		// update our identity register with the remaining interupts
196 		identity = read32(info, reg);
197 	}
198 
199 	return handled;
200 }
201 
202 
203 static void
204 init_interrupt_handler(intel_info &info)
205 {
206 	info.shared_info->vblank_sem = create_sem(0, "intel extreme vblank");
207 	if (info.shared_info->vblank_sem < B_OK)
208 		return;
209 
210 	status_t status = B_OK;
211 
212 	// We need to change the owner of the sem to the calling team (usually the
213 	// app_server), because userland apps cannot acquire kernel semaphores
214 	thread_id thread = find_thread(NULL);
215 	thread_info threadInfo;
216 	if (get_thread_info(thread, &threadInfo) != B_OK
217 		|| set_sem_owner(info.shared_info->vblank_sem, threadInfo.team)
218 			!= B_OK) {
219 		status = B_ERROR;
220 	}
221 
222 	// Find the right interrupt vector, using MSIs if available.
223 	info.irq = 0xff;
224 	info.use_msi = false;
225 	if (info.pci->u.h0.interrupt_pin != 0x00)
226 		info.irq = info.pci->u.h0.interrupt_line;
227 	if (gPCIx86Module != NULL && gPCIx86Module->get_msi_count(info.pci->bus,
228 			info.pci->device, info.pci->function) >= 1) {
229 		uint8 msiVector = 0;
230 		if (gPCIx86Module->configure_msi(info.pci->bus, info.pci->device,
231 				info.pci->function, 1, &msiVector) == B_OK
232 			&& gPCIx86Module->enable_msi(info.pci->bus, info.pci->device,
233 				info.pci->function) == B_OK) {
234 			TRACE("using message signaled interrupts\n");
235 			info.irq = msiVector;
236 			info.use_msi = true;
237 		}
238 	}
239 
240 	if (status == B_OK && info.irq != 0xff) {
241 		// we've gotten an interrupt line for us to use
242 
243 		info.fake_interrupts = false;
244 
245 		status = install_io_interrupt_handler(info.irq,
246 			&intel_interrupt_handler, (void*)&info, 0);
247 		if (status == B_OK) {
248 			write32(info, INTEL_DISPLAY_A_PIPE_STATUS,
249 				DISPLAY_PIPE_VBLANK_STATUS | DISPLAY_PIPE_VBLANK_ENABLED);
250 			write32(info, INTEL_DISPLAY_B_PIPE_STATUS,
251 				DISPLAY_PIPE_VBLANK_STATUS | DISPLAY_PIPE_VBLANK_ENABLED);
252 
253 			uint32 enable = intel_get_interrupt_mask(info,
254 				INTEL_PIPE_A | INTEL_PIPE_B, true);
255 
256 			// Clear all the interrupts
257 			write32(info, find_reg(info, INTEL_INTERRUPT_IDENTITY), ~0);
258 
259 			// enable interrupts - we only want VBLANK interrupts
260 			write32(info, find_reg(info, INTEL_INTERRUPT_ENABLED), enable);
261 			write32(info, find_reg(info, INTEL_INTERRUPT_MASK), ~enable);
262 		}
263 	}
264 	if (status < B_OK) {
265 		// There is no interrupt reserved for us, or we couldn't install our
266 		// interrupt handler, let's fake the vblank interrupt for our clients
267 		// using a timer interrupt
268 		info.fake_interrupts = true;
269 
270 		// TODO: fake interrupts!
271 		ERROR("Fake interrupt mode (no PCI interrupt line assigned\n");
272 		status = B_ERROR;
273 	}
274 
275 	if (status < B_OK) {
276 		delete_sem(info.shared_info->vblank_sem);
277 		info.shared_info->vblank_sem = B_ERROR;
278 	}
279 }
280 
281 
282 //	#pragma mark -
283 
284 
285 status_t
286 intel_free_memory(intel_info &info, addr_t base)
287 {
288 	return gGART->free_memory(info.aperture, base);
289 }
290 
291 
292 status_t
293 intel_allocate_memory(intel_info &info, size_t size, size_t alignment,
294 	uint32 flags, addr_t* _base, phys_addr_t* _physicalBase)
295 {
296 	return gGART->allocate_memory(info.aperture, size, alignment,
297 		flags, _base, _physicalBase);
298 }
299 
300 
301 status_t
302 intel_extreme_init(intel_info &info)
303 {
304 	CALLED();
305 	info.aperture = gGART->map_aperture(info.pci->bus, info.pci->device,
306 		info.pci->function, 0, &info.aperture_base);
307 	if (info.aperture < B_OK) {
308 		ERROR("error: could not map GART aperture! (%s)\n",
309 			strerror(info.aperture));
310 		return info.aperture;
311 	}
312 
313 	AreaKeeper sharedCreator;
314 	info.shared_area = sharedCreator.Create("intel extreme shared info",
315 		(void**)&info.shared_info, B_ANY_KERNEL_ADDRESS,
316 		ROUND_TO_PAGE_SIZE(sizeof(intel_shared_info)) + 3 * B_PAGE_SIZE,
317 		B_FULL_LOCK,
318 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA | B_CLONEABLE_AREA);
319 	if (info.shared_area < B_OK) {
320 		ERROR("error: could not create shared area!\n");
321 		gGART->unmap_aperture(info.aperture);
322 		return info.shared_area;
323 	}
324 
325 	memset((void*)info.shared_info, 0, sizeof(intel_shared_info));
326 
327 	int mmioIndex = 1;
328 	if (info.device_type.Generation() >= 3) {
329 		// For some reason Intel saw the need to change the order of the
330 		// mappings with the introduction of the i9xx family
331 		mmioIndex = 0;
332 	}
333 
334 	// evaluate driver settings, if any
335 
336 	bool hardwareCursor;
337 	read_settings(hardwareCursor);
338 
339 	// memory mapped I/O
340 
341 	// TODO: registers are mapped twice (by us and intel_gart), maybe we
342 	// can share it between the drivers
343 
344 	AreaKeeper mmioMapper;
345 	info.registers_area = mmioMapper.Map("intel extreme mmio",
346 		info.pci->u.h0.base_registers[mmioIndex],
347 		info.pci->u.h0.base_register_sizes[mmioIndex],
348 		B_ANY_KERNEL_ADDRESS,
349 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA | B_CLONEABLE_AREA,
350 		(void**)&info.registers);
351 	if (mmioMapper.InitCheck() < B_OK) {
352 		ERROR("error: could not map memory I/O!\n");
353 		gGART->unmap_aperture(info.aperture);
354 		return info.registers_area;
355 	}
356 
357 	bool hasPCH = (info.pch_info != INTEL_PCH_NONE);
358 
359 	ERROR("Init Intel generation %d GPU %s PCH split.\n",
360 		info.device_type.Generation(), hasPCH ? "with" : "without");
361 
362 	uint32* blocks = info.shared_info->register_blocks;
363 	blocks[REGISTER_BLOCK(REGS_FLAT)] = 0;
364 
365 	// setup the register blocks for the different architectures
366 	if (hasPCH) {
367 		// PCH based platforms (IronLake through ultra-low-power Broadwells)
368 		blocks[REGISTER_BLOCK(REGS_NORTH_SHARED)]
369 			= PCH_NORTH_SHARED_REGISTER_BASE;
370 		blocks[REGISTER_BLOCK(REGS_NORTH_PIPE_AND_PORT)]
371 			= PCH_NORTH_PIPE_AND_PORT_REGISTER_BASE;
372 		blocks[REGISTER_BLOCK(REGS_NORTH_PLANE_CONTROL)]
373 			= PCH_NORTH_PLANE_CONTROL_REGISTER_BASE;
374 		blocks[REGISTER_BLOCK(REGS_SOUTH_SHARED)]
375 			= PCH_SOUTH_SHARED_REGISTER_BASE;
376 		blocks[REGISTER_BLOCK(REGS_SOUTH_TRANSCODER_PORT)]
377 			= PCH_SOUTH_TRANSCODER_AND_PORT_REGISTER_BASE;
378 	} else {
379 		// (G)MCH/ICH based platforms
380 		blocks[REGISTER_BLOCK(REGS_NORTH_SHARED)]
381 			= MCH_SHARED_REGISTER_BASE;
382 		blocks[REGISTER_BLOCK(REGS_NORTH_PIPE_AND_PORT)]
383 			= MCH_PIPE_AND_PORT_REGISTER_BASE;
384 		blocks[REGISTER_BLOCK(REGS_NORTH_PLANE_CONTROL)]
385 			= MCH_PLANE_CONTROL_REGISTER_BASE;
386 		blocks[REGISTER_BLOCK(REGS_SOUTH_SHARED)]
387 			= ICH_SHARED_REGISTER_BASE;
388 		blocks[REGISTER_BLOCK(REGS_SOUTH_TRANSCODER_PORT)]
389 			= ICH_PORT_REGISTER_BASE;
390 	}
391 
392 	// Everything in the display PRM gets +0x180000
393 	if (info.device_type.InGroup(INTEL_GROUP_VLV)) {
394 		// "I nearly got violent with the hw guys when they told me..."
395 		blocks[REGISTER_BLOCK(REGS_SOUTH_SHARED)] += VLV_DISPLAY_BASE;
396 		blocks[REGISTER_BLOCK(REGS_SOUTH_TRANSCODER_PORT)] += VLV_DISPLAY_BASE;
397 	}
398 
399 	TRACE("REGS_NORTH_SHARED: 0x%" B_PRIx32 "\n",
400 		blocks[REGISTER_BLOCK(REGS_NORTH_SHARED)]);
401 	TRACE("REGS_NORTH_PIPE_AND_PORT: 0x%" B_PRIx32 "\n",
402 		blocks[REGISTER_BLOCK(REGS_NORTH_PIPE_AND_PORT)]);
403 	TRACE("REGS_NORTH_PLANE_CONTROL: 0x%" B_PRIx32 "\n",
404 		blocks[REGISTER_BLOCK(REGS_NORTH_PLANE_CONTROL)]);
405 	TRACE("REGS_SOUTH_SHARED: 0x%" B_PRIx32 "\n",
406 		blocks[REGISTER_BLOCK(REGS_SOUTH_SHARED)]);
407 	TRACE("REGS_SOUTH_TRANSCODER_PORT: 0x%" B_PRIx32 "\n",
408 		blocks[REGISTER_BLOCK(REGS_SOUTH_TRANSCODER_PORT)]);
409 
410 	// make sure bus master, memory-mapped I/O, and frame buffer is enabled
411 	set_pci_config(info.pci, PCI_command, 2, get_pci_config(info.pci,
412 		PCI_command, 2) | PCI_command_io | PCI_command_memory
413 		| PCI_command_master);
414 
415 	// reserve ring buffer memory (currently, this memory is placed in
416 	// the graphics memory), but this could bring us problems with
417 	// write combining...
418 
419 	ring_buffer &primary = info.shared_info->primary_ring_buffer;
420 	if (intel_allocate_memory(info, 16 * B_PAGE_SIZE, 0, 0,
421 			(addr_t*)&primary.base) == B_OK) {
422 		primary.register_base = INTEL_PRIMARY_RING_BUFFER;
423 		primary.size = 16 * B_PAGE_SIZE;
424 		primary.offset = (addr_t)primary.base - info.aperture_base;
425 	}
426 
427 	// Enable clock gating
428 	intel_en_gating(info);
429 
430 	// Enable automatic gpu downclocking if we can to save power
431 	intel_en_downclock(info);
432 
433 	// no errors, so keep areas and mappings
434 	sharedCreator.Detach();
435 	mmioMapper.Detach();
436 
437 	aperture_info apertureInfo;
438 	gGART->get_aperture_info(info.aperture, &apertureInfo);
439 
440 	info.shared_info->registers_area = info.registers_area;
441 	info.shared_info->graphics_memory = (uint8*)info.aperture_base;
442 	info.shared_info->physical_graphics_memory = apertureInfo.physical_base;
443 	info.shared_info->graphics_memory_size = apertureInfo.size;
444 	info.shared_info->frame_buffer = 0;
445 	info.shared_info->dpms_mode = B_DPMS_ON;
446 
447 	// Pull VBIOS panel mode for later use
448 	info.shared_info->got_vbt = get_lvds_mode_from_bios(
449 		&info.shared_info->panel_timing);
450 
451 	/* at least 855gm can't drive more than one head at time */
452 	if (info.device_type.InFamily(INTEL_FAMILY_8xx))
453 		info.shared_info->single_head_locked = 1;
454 
455 	if (info.device_type.InFamily(INTEL_FAMILY_SER5)) {
456 		info.shared_info->pll_info.reference_frequency = 120000;// 120 MHz
457 		info.shared_info->pll_info.max_frequency = 350000;
458 			// 350 MHz RAM DAC speed
459 		info.shared_info->pll_info.min_frequency = 20000;		// 20 MHz
460 	} else if (info.device_type.InFamily(INTEL_FAMILY_9xx)) {
461 		info.shared_info->pll_info.reference_frequency = 96000;	// 96 MHz
462 		info.shared_info->pll_info.max_frequency = 400000;
463 			// 400 MHz RAM DAC speed
464 		info.shared_info->pll_info.min_frequency = 20000;		// 20 MHz
465 	} else if (info.device_type.HasDDI() && (info.device_type.Generation() <= 8)) {
466 		info.shared_info->pll_info.reference_frequency = 135000;// 135 MHz
467 		info.shared_info->pll_info.max_frequency = 350000;
468 			// 350 MHz RAM DAC speed
469 		info.shared_info->pll_info.min_frequency = 25000;		// 25 MHz
470 	} else if ((info.device_type.Generation() == 9) &&
471 				info.device_type.InGroup(INTEL_GROUP_SKY)) {
472 		info.shared_info->pll_info.reference_frequency = 24000;	// 24 MHz
473 		info.shared_info->pll_info.max_frequency = 350000;
474 			// 350 MHz RAM DAC speed
475 		info.shared_info->pll_info.min_frequency = 25000;		// 25 MHz
476 	} else if (info.device_type.Generation() == 9) {
477 		uint32 refInfo =
478 			(read32(info, ICL_DSSM) & ICL_DSSM_REF_FREQ_MASK) >> ICL_DSSM_REF_FREQ_SHIFT;
479 		switch (refInfo) {
480 			case ICL_DSSM_24000:
481 				info.shared_info->pll_info.reference_frequency = 24000;	// 24 MHz
482 				break;
483 			case ICL_DSSM_19200:
484 				info.shared_info->pll_info.reference_frequency = 19200;	// 19.2 MHz
485 				break;
486 			case ICL_DSSM_38400:
487 				info.shared_info->pll_info.reference_frequency = 38400;	// 38.4 MHz
488 				break;
489 			default:
490 				ERROR("error: unknown ref. freq. strap, using 24Mhz! %" B_PRIx32 "\n", refInfo);
491 				info.shared_info->pll_info.reference_frequency = 24000;	// 24 MHz
492 				break;
493 		}
494 		info.shared_info->pll_info.max_frequency = 350000;
495 			// 350 MHz RAM DAC speed
496 		info.shared_info->pll_info.min_frequency = 25000;		// 25 MHz
497 	} else {
498 		info.shared_info->pll_info.reference_frequency = 48000;	// 48 MHz
499 		info.shared_info->pll_info.max_frequency = 350000;
500 			// 350 MHz RAM DAC speed
501 		info.shared_info->pll_info.min_frequency = 25000;		// 25 MHz
502 	}
503 
504 	info.shared_info->pll_info.divisor_register = INTEL_DISPLAY_A_PLL_DIVISOR_0;
505 
506 	info.shared_info->pch_info = info.pch_info;
507 
508 	info.shared_info->device_type = info.device_type;
509 #ifdef __HAIKU__
510 	strlcpy(info.shared_info->device_identifier, info.device_identifier,
511 		sizeof(info.shared_info->device_identifier));
512 #else
513 	strcpy(info.shared_info->device_identifier, info.device_identifier);
514 #endif
515 
516 	// setup overlay registers
517 
518 	status_t status = intel_allocate_memory(info, B_PAGE_SIZE, 0,
519 		intel_uses_physical_overlay(*info.shared_info)
520 				? B_APERTURE_NEED_PHYSICAL : 0,
521 		(addr_t*)&info.overlay_registers,
522 		&info.shared_info->physical_overlay_registers);
523 	if (status == B_OK) {
524 		info.shared_info->overlay_offset = (addr_t)info.overlay_registers
525 			- info.aperture_base;
526 		TRACE("Overlay registers mapped at 0x%" B_PRIx32 " = %p - %"
527 			B_PRIxADDR " (%" B_PRIxPHYSADDR ")\n",
528 			info.shared_info->overlay_offset, info.overlay_registers,
529 			info.aperture_base, info.shared_info->physical_overlay_registers);
530 		init_overlay_registers(info.overlay_registers);
531 	} else {
532 		ERROR("error: could not allocate overlay memory! %s\n",
533 			strerror(status));
534 	}
535 
536 	// Allocate hardware status page and the cursor memory
537 	TRACE("Allocating hardware status page");
538 
539 	if (intel_allocate_memory(info, B_PAGE_SIZE, 0, B_APERTURE_NEED_PHYSICAL,
540 			(addr_t*)info.shared_info->status_page,
541 			&info.shared_info->physical_status_page) == B_OK) {
542 		// TODO: set status page
543 	}
544 	if (hardwareCursor) {
545 		intel_allocate_memory(info, B_PAGE_SIZE, 0, B_APERTURE_NEED_PHYSICAL,
546 			(addr_t*)&info.shared_info->cursor_memory,
547 			&info.shared_info->physical_cursor_memory);
548 	}
549 
550 	edid1_info* edidInfo = (edid1_info*)get_boot_item(VESA_EDID_BOOT_INFO,
551 		NULL);
552 	if (edidInfo != NULL) {
553 		info.shared_info->has_vesa_edid_info = true;
554 		memcpy(&info.shared_info->vesa_edid_info, edidInfo, sizeof(edid1_info));
555 	}
556 
557 	init_interrupt_handler(info);
558 
559 	if (hasPCH) {
560 		if (info.device_type.Generation() == 5) {
561 			info.shared_info->fdi_link_frequency = (read32(info, FDI_PLL_BIOS_0)
562 				& FDI_PLL_FB_CLOCK_MASK) + 2;
563 			info.shared_info->fdi_link_frequency *= 100;
564 		} else {
565 			info.shared_info->fdi_link_frequency = 2700;
566 		}
567 	} else {
568 		info.shared_info->fdi_link_frequency = 0;
569 	}
570 
571 	TRACE("%s: completed successfully!\n", __func__);
572 	return B_OK;
573 }
574 
575 
576 void
577 intel_extreme_uninit(intel_info &info)
578 {
579 	CALLED();
580 
581 	if (!info.fake_interrupts && info.shared_info->vblank_sem > 0) {
582 		// disable interrupt generation
583 		write32(info, find_reg(info, INTEL_INTERRUPT_ENABLED), 0);
584 		write32(info, find_reg(info, INTEL_INTERRUPT_MASK), ~0);
585 
586 		remove_io_interrupt_handler(info.irq, intel_interrupt_handler, &info);
587 
588 		if (info.use_msi && gPCIx86Module != NULL) {
589 			gPCIx86Module->disable_msi(info.pci->bus,
590 				info.pci->device, info.pci->function);
591 			gPCIx86Module->unconfigure_msi(info.pci->bus,
592 				info.pci->device, info.pci->function);
593 		}
594 	}
595 
596 	gGART->unmap_aperture(info.aperture);
597 
598 	delete_area(info.registers_area);
599 	delete_area(info.shared_area);
600 }
601 
602