xref: /haiku/src/add-ons/kernel/drivers/graphics/intel_extreme/intel_extreme.cpp (revision 1e60bdeab63fa7a57bc9a55b032052e95a18bd2c)
1 /*
2  * Copyright 2006-2018, Haiku, Inc. All Rights Reserved.
3  * Distributed under the terms of the MIT License.
4  *
5  * Authors:
6  *		Axel Dörfler, axeld@pinc-software.de
7  *		Alexander von Gluck IV, kallisti5@unixzen.com
8  *		Adrien Destugues, pulkomandy@pulkomandy.tk
9  */
10 
11 
12 #include "intel_extreme.h"
13 
14 #include "AreaKeeper.h"
15 #include <unistd.h>
16 #include <stdio.h>
17 #include <string.h>
18 #include <errno.h>
19 
20 #include <boot_item.h>
21 #include <driver_settings.h>
22 #include <util/kernel_cpp.h>
23 
24 #include <vesa_info.h>
25 
26 #include "driver.h"
27 #include "power.h"
28 #include "utility.h"
29 
30 
31 #define TRACE_INTELEXTREME
32 #ifdef TRACE_INTELEXTREME
33 #	define TRACE(x...) dprintf("intel_extreme: " x)
34 #else
35 #	define TRACE(x) ;
36 #endif
37 
38 #define ERROR(x...) dprintf("intel_extreme: " x)
39 #define CALLED(x...) TRACE("intel_extreme: CALLED %s\n", __PRETTY_FUNCTION__)
40 
41 
42 static void
43 init_overlay_registers(overlay_registers* _registers)
44 {
45 	user_memset(_registers, 0, B_PAGE_SIZE);
46 
47 	overlay_registers registers;
48 	memset(&registers, 0, sizeof(registers));
49 	registers.contrast_correction = 0x48;
50 	registers.saturation_cos_correction = 0x9a;
51 		// this by-passes contrast and saturation correction
52 
53 	user_memcpy(_registers, &registers, sizeof(overlay_registers));
54 }
55 
56 
57 static void
58 read_settings(bool &hardwareCursor)
59 {
60 	hardwareCursor = false;
61 
62 	void* settings = load_driver_settings("intel_extreme");
63 	if (settings != NULL) {
64 		hardwareCursor = get_driver_boolean_parameter(settings,
65 			"hardware_cursor", true, true);
66 
67 		unload_driver_settings(settings);
68 	}
69 }
70 
71 
72 static int32
73 release_vblank_sem(intel_info &info)
74 {
75 	int32 count;
76 	if (get_sem_count(info.shared_info->vblank_sem, &count) == B_OK
77 		&& count < 0) {
78 		release_sem_etc(info.shared_info->vblank_sem, -count,
79 			B_DO_NOT_RESCHEDULE);
80 		return B_INVOKE_SCHEDULER;
81 	}
82 
83 	return B_HANDLED_INTERRUPT;
84 }
85 
86 
87 /** Get the appropriate interrupt mask for enabling or testing interrupts on
88  * the given pipes.
89  *
90  * The bits to test or set are different depending on the hardware generation.
91  *
92  * \param info Intel_extreme driver information
93  * \param pipes bit mask of the pipes to use
94  * \param enable true to get the mask for enabling the interrupts, false to get
95  *               the mask for testing them.
96  */
97 static uint32
98 intel_get_interrupt_mask(intel_info& info, int pipes, bool enable)
99 {
100 	uint32 mask = 0;
101 	bool hasPCH = info.pch_info != INTEL_PCH_NONE;
102 
103 	// Intel changed the PCH register mapping between Sandy Bridge and the
104 	// later generations (Ivy Bridge and up).
105 
106 	if ((pipes & INTEL_PIPE_A) != 0) {
107 		if (info.device_type.InGroup(INTEL_GROUP_SNB))
108 			mask |= PCH_INTERRUPT_VBLANK_PIPEA_SNB;
109 		else if (hasPCH)
110 			mask |= PCH_INTERRUPT_VBLANK_PIPEA;
111 		else
112 			mask |= INTERRUPT_VBLANK_PIPEA;
113 	}
114 
115 	if ((pipes & INTEL_PIPE_B) != 0) {
116 		if (info.device_type.InGroup(INTEL_GROUP_SNB))
117 			mask |= PCH_INTERRUPT_VBLANK_PIPEB_SNB;
118 		else if (hasPCH)
119 			mask |= PCH_INTERRUPT_VBLANK_PIPEB;
120 		else
121 			mask |= INTERRUPT_VBLANK_PIPEB;
122 	}
123 
124 	// On SandyBridge, there is an extra "global enable" flag, which must also
125 	// be set when enabling the interrupts (but not when testing for them).
126 	if (enable && info.device_type.InGroup(INTEL_GROUP_SNB))
127 		mask |= PCH_INTERRUPT_GLOBAL_SNB;
128 
129 	return mask;
130 }
131 
132 
133 static int32
134 intel_interrupt_handler(void* data)
135 {
136 	intel_info &info = *(intel_info*)data;
137 	uint32 reg = find_reg(info, INTEL_INTERRUPT_IDENTITY);
138 	bool hasPCH = (info.pch_info != INTEL_PCH_NONE);
139 	uint32 identity;
140 
141 	if (hasPCH)
142 		identity = read32(info, reg);
143 	else
144 		identity = read16(info, reg);
145 
146 	if (identity == 0)
147 		return B_UNHANDLED_INTERRUPT;
148 
149 	int32 handled = B_HANDLED_INTERRUPT;
150 
151 	while (identity != 0) {
152 
153 		uint32 mask = intel_get_interrupt_mask(info, INTEL_PIPE_A, false);
154 
155 		if ((identity & mask) != 0) {
156 			handled = release_vblank_sem(info);
157 
158 			// make sure we'll get another one of those
159 			write32(info, INTEL_DISPLAY_A_PIPE_STATUS,
160 				DISPLAY_PIPE_VBLANK_STATUS | DISPLAY_PIPE_VBLANK_ENABLED);
161 		}
162 
163 		mask = intel_get_interrupt_mask(info, INTEL_PIPE_B, false);
164 		if ((identity & mask) != 0) {
165 			handled = release_vblank_sem(info);
166 
167 			// make sure we'll get another one of those
168 			write32(info, INTEL_DISPLAY_B_PIPE_STATUS,
169 				DISPLAY_PIPE_VBLANK_STATUS | DISPLAY_PIPE_VBLANK_ENABLED);
170 		}
171 
172 #if 0
173 		// FIXME we don't have supprot for the 3rd pipe yet
174 		mask = hasPCH ? PCH_INTERRUPT_VBLANK_PIPEC
175 			: 0;
176 		if ((identity & mask) != 0) {
177 			handled = release_vblank_sem(info);
178 
179 			// make sure we'll get another one of those
180 			write32(info, INTEL_DISPLAY_C_PIPE_STATUS,
181 				DISPLAY_PIPE_VBLANK_STATUS | DISPLAY_PIPE_VBLANK_ENABLED);
182 		}
183 #endif
184 
185 		// setting the bit clears it!
186 		if (hasPCH) {
187 			write32(info, reg, identity);
188 			identity = read32(info, reg);
189 		} else {
190 			write16(info, reg, identity);
191 			identity = read16(info, reg);
192 		}
193 	}
194 
195 	return handled;
196 }
197 
198 
199 static void
200 init_interrupt_handler(intel_info &info)
201 {
202 	info.shared_info->vblank_sem = create_sem(0, "intel extreme vblank");
203 	if (info.shared_info->vblank_sem < B_OK)
204 		return;
205 
206 	status_t status = B_OK;
207 
208 	// We need to change the owner of the sem to the calling team (usually the
209 	// app_server), because userland apps cannot acquire kernel semaphores
210 	thread_id thread = find_thread(NULL);
211 	thread_info threadInfo;
212 	if (get_thread_info(thread, &threadInfo) != B_OK
213 		|| set_sem_owner(info.shared_info->vblank_sem, threadInfo.team)
214 			!= B_OK) {
215 		status = B_ERROR;
216 	}
217 
218 	// Find the right interrupt vector, using MSIs if available.
219 	info.irq = 0xff;
220 	info.use_msi = false;
221 	if (info.pci->u.h0.interrupt_pin != 0x00)
222 		info.irq = info.pci->u.h0.interrupt_line;
223 	if (gPCIx86Module != NULL && gPCIx86Module->get_msi_count(info.pci->bus,
224 			info.pci->device, info.pci->function) >= 1) {
225 		uint8 msiVector = 0;
226 		if (gPCIx86Module->configure_msi(info.pci->bus, info.pci->device,
227 				info.pci->function, 1, &msiVector) == B_OK
228 			&& gPCIx86Module->enable_msi(info.pci->bus, info.pci->device,
229 				info.pci->function) == B_OK) {
230 			ERROR("using message signaled interrupts\n");
231 			info.irq = msiVector;
232 			info.use_msi = true;
233 		}
234 	}
235 
236 	if (status == B_OK && info.irq != 0xff) {
237 		// we've gotten an interrupt line for us to use
238 
239 		info.fake_interrupts = false;
240 
241 		status = install_io_interrupt_handler(info.irq,
242 			&intel_interrupt_handler, (void*)&info, 0);
243 		if (status == B_OK) {
244 			write32(info, INTEL_DISPLAY_A_PIPE_STATUS,
245 				DISPLAY_PIPE_VBLANK_STATUS | DISPLAY_PIPE_VBLANK_ENABLED);
246 			write32(info, INTEL_DISPLAY_B_PIPE_STATUS,
247 				DISPLAY_PIPE_VBLANK_STATUS | DISPLAY_PIPE_VBLANK_ENABLED);
248 
249 			bool hasPCH = (info.pch_info != INTEL_PCH_NONE);
250 
251 			uint32 enable = intel_get_interrupt_mask(info,
252 				INTEL_PIPE_A | INTEL_PIPE_B, true);
253 
254 			if (hasPCH) {
255 				// Clear all the interrupts
256 				write32(info, find_reg(info, INTEL_INTERRUPT_IDENTITY), ~0);
257 
258 				// enable interrupts - we only want VBLANK interrupts
259 				write32(info, find_reg(info, INTEL_INTERRUPT_ENABLED), enable);
260 				write32(info, find_reg(info, INTEL_INTERRUPT_MASK), ~enable);
261 			} else {
262 				// Clear all the interrupts
263 				write16(info, find_reg(info, INTEL_INTERRUPT_IDENTITY), ~0);
264 
265 				// enable interrupts - we only want VBLANK interrupts
266 				write16(info, find_reg(info, INTEL_INTERRUPT_ENABLED), enable);
267 				write16(info, find_reg(info, INTEL_INTERRUPT_MASK), ~enable);
268 			}
269 
270 		}
271 	}
272 	if (status < B_OK) {
273 		// There is no interrupt reserved for us, or we couldn't install our
274 		// interrupt handler, let's fake the vblank interrupt for our clients
275 		// using a timer interrupt
276 		info.fake_interrupts = true;
277 
278 		// TODO: fake interrupts!
279 		TRACE("Fake interrupt mode (no PCI interrupt line assigned\n");
280 		status = B_ERROR;
281 	}
282 
283 	if (status < B_OK) {
284 		delete_sem(info.shared_info->vblank_sem);
285 		info.shared_info->vblank_sem = B_ERROR;
286 	}
287 }
288 
289 
290 //	#pragma mark -
291 
292 
293 status_t
294 intel_free_memory(intel_info &info, addr_t base)
295 {
296 	return gGART->free_memory(info.aperture, base);
297 }
298 
299 
300 status_t
301 intel_allocate_memory(intel_info &info, size_t size, size_t alignment,
302 	uint32 flags, addr_t* _base, phys_addr_t* _physicalBase)
303 {
304 	return gGART->allocate_memory(info.aperture, size, alignment,
305 		flags, _base, _physicalBase);
306 }
307 
308 
309 status_t
310 intel_extreme_init(intel_info &info)
311 {
312 	CALLED();
313 	info.aperture = gGART->map_aperture(info.pci->bus, info.pci->device,
314 		info.pci->function, 0, &info.aperture_base);
315 	if (info.aperture < B_OK) {
316 		ERROR("error: could not map GART aperture! (%s)\n",
317 			strerror(info.aperture));
318 		return info.aperture;
319 	}
320 
321 	AreaKeeper sharedCreator;
322 	info.shared_area = sharedCreator.Create("intel extreme shared info",
323 		(void**)&info.shared_info, B_ANY_KERNEL_ADDRESS,
324 		ROUND_TO_PAGE_SIZE(sizeof(intel_shared_info)) + 3 * B_PAGE_SIZE,
325 		B_FULL_LOCK,
326 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA | B_USER_CLONEABLE_AREA);
327 	if (info.shared_area < B_OK) {
328 		ERROR("error: could not create shared area!\n");
329 		gGART->unmap_aperture(info.aperture);
330 		return info.shared_area;
331 	}
332 
333 	memset((void*)info.shared_info, 0, sizeof(intel_shared_info));
334 
335 	int mmioIndex = 1;
336 	if (info.device_type.Generation() >= 3) {
337 		// For some reason Intel saw the need to change the order of the
338 		// mappings with the introduction of the i9xx family
339 		mmioIndex = 0;
340 	}
341 
342 	// evaluate driver settings, if any
343 
344 	bool hardwareCursor;
345 	read_settings(hardwareCursor);
346 
347 	// memory mapped I/O
348 
349 	// TODO: registers are mapped twice (by us and intel_gart), maybe we
350 	// can share it between the drivers
351 
352 	AreaKeeper mmioMapper;
353 	info.registers_area = mmioMapper.Map("intel extreme mmio",
354 		info.pci->u.h0.base_registers[mmioIndex],
355 		info.pci->u.h0.base_register_sizes[mmioIndex],
356 		B_ANY_KERNEL_ADDRESS,
357 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA | B_USER_CLONEABLE_AREA,
358 		(void**)&info.registers);
359 	if (mmioMapper.InitCheck() < B_OK) {
360 		ERROR("error: could not map memory I/O!\n");
361 		gGART->unmap_aperture(info.aperture);
362 		return info.registers_area;
363 	}
364 
365 	bool hasPCH = (info.pch_info != INTEL_PCH_NONE);
366 
367 	ERROR("Init Intel generation %d GPU %s PCH split.\n",
368 		info.device_type.Generation(), hasPCH ? "with" : "without");
369 
370 	uint32* blocks = info.shared_info->register_blocks;
371 	blocks[REGISTER_BLOCK(REGS_FLAT)] = 0;
372 
373 	// setup the register blocks for the different architectures
374 	if (hasPCH) {
375 		// PCH based platforms (IronLake through ultra-low-power Broadwells)
376 		blocks[REGISTER_BLOCK(REGS_NORTH_SHARED)]
377 			= PCH_NORTH_SHARED_REGISTER_BASE;
378 		blocks[REGISTER_BLOCK(REGS_NORTH_PIPE_AND_PORT)]
379 			= PCH_NORTH_PIPE_AND_PORT_REGISTER_BASE;
380 		blocks[REGISTER_BLOCK(REGS_NORTH_PLANE_CONTROL)]
381 			= PCH_NORTH_PLANE_CONTROL_REGISTER_BASE;
382 		blocks[REGISTER_BLOCK(REGS_SOUTH_SHARED)]
383 			= PCH_SOUTH_SHARED_REGISTER_BASE;
384 		blocks[REGISTER_BLOCK(REGS_SOUTH_TRANSCODER_PORT)]
385 			= PCH_SOUTH_TRANSCODER_AND_PORT_REGISTER_BASE;
386 	} else {
387 		// (G)MCH/ICH based platforms
388 		blocks[REGISTER_BLOCK(REGS_NORTH_SHARED)]
389 			= MCH_SHARED_REGISTER_BASE;
390 		blocks[REGISTER_BLOCK(REGS_NORTH_PIPE_AND_PORT)]
391 			= MCH_PIPE_AND_PORT_REGISTER_BASE;
392 		blocks[REGISTER_BLOCK(REGS_NORTH_PLANE_CONTROL)]
393 			= MCH_PLANE_CONTROL_REGISTER_BASE;
394 		blocks[REGISTER_BLOCK(REGS_SOUTH_SHARED)]
395 			= ICH_SHARED_REGISTER_BASE;
396 		blocks[REGISTER_BLOCK(REGS_SOUTH_TRANSCODER_PORT)]
397 			= ICH_PORT_REGISTER_BASE;
398 	}
399 
400 	// Everything in the display PRM gets +0x180000
401 	if (info.device_type.InGroup(INTEL_GROUP_VLV)) {
402 		// "I nearly got violent with the hw guys when they told me..."
403 		blocks[REGISTER_BLOCK(REGS_SOUTH_SHARED)] += VLV_DISPLAY_BASE;
404 		blocks[REGISTER_BLOCK(REGS_SOUTH_TRANSCODER_PORT)] += VLV_DISPLAY_BASE;
405 	}
406 
407 	TRACE("REGS_NORTH_SHARED: 0x%" B_PRIx32 "\n",
408 		blocks[REGISTER_BLOCK(REGS_NORTH_SHARED)]);
409 	TRACE("REGS_NORTH_PIPE_AND_PORT: 0x%" B_PRIx32 "\n",
410 		blocks[REGISTER_BLOCK(REGS_NORTH_PIPE_AND_PORT)]);
411 	TRACE("REGS_NORTH_PLANE_CONTROL: 0x%" B_PRIx32 "\n",
412 		blocks[REGISTER_BLOCK(REGS_NORTH_PLANE_CONTROL)]);
413 	TRACE("REGS_SOUTH_SHARED: 0x%" B_PRIx32 "\n",
414 		blocks[REGISTER_BLOCK(REGS_SOUTH_SHARED)]);
415 	TRACE("REGS_SOUTH_TRANSCODER_PORT: 0x%" B_PRIx32 "\n",
416 		blocks[REGISTER_BLOCK(REGS_SOUTH_TRANSCODER_PORT)]);
417 
418 	// make sure bus master, memory-mapped I/O, and frame buffer is enabled
419 	set_pci_config(info.pci, PCI_command, 2, get_pci_config(info.pci,
420 		PCI_command, 2) | PCI_command_io | PCI_command_memory
421 		| PCI_command_master);
422 
423 	// reserve ring buffer memory (currently, this memory is placed in
424 	// the graphics memory), but this could bring us problems with
425 	// write combining...
426 
427 	ring_buffer &primary = info.shared_info->primary_ring_buffer;
428 	if (intel_allocate_memory(info, 16 * B_PAGE_SIZE, 0, 0,
429 			(addr_t*)&primary.base) == B_OK) {
430 		primary.register_base = INTEL_PRIMARY_RING_BUFFER;
431 		primary.size = 16 * B_PAGE_SIZE;
432 		primary.offset = (addr_t)primary.base - info.aperture_base;
433 	}
434 
435 	// Enable clock gating
436 	intel_en_gating(info);
437 
438 	// Enable automatic gpu downclocking if we can to save power
439 	intel_en_downclock(info);
440 
441 	// no errors, so keep areas and mappings
442 	sharedCreator.Detach();
443 	mmioMapper.Detach();
444 
445 	aperture_info apertureInfo;
446 	gGART->get_aperture_info(info.aperture, &apertureInfo);
447 
448 	info.shared_info->registers_area = info.registers_area;
449 	info.shared_info->graphics_memory = (uint8*)info.aperture_base;
450 	info.shared_info->physical_graphics_memory = apertureInfo.physical_base;
451 	info.shared_info->graphics_memory_size = apertureInfo.size;
452 	info.shared_info->frame_buffer = 0;
453 	info.shared_info->dpms_mode = B_DPMS_ON;
454 
455 	// Pull VBIOS panel mode for later use
456 	info.shared_info->got_vbt = get_lvds_mode_from_bios(
457 		&info.shared_info->panel_mode);
458 
459 	/* at least 855gm can't drive more than one head at time */
460 	if (info.device_type.InFamily(INTEL_FAMILY_8xx))
461 		info.shared_info->single_head_locked = 1;
462 
463 	if (info.device_type.InFamily(INTEL_FAMILY_SER5)) {
464 		info.shared_info->pll_info.reference_frequency = 120000;	// 120 MHz
465 		info.shared_info->pll_info.max_frequency = 350000;
466 			// 350 MHz RAM DAC speed
467 		info.shared_info->pll_info.min_frequency = 20000;		// 20 MHz
468 	} else if (info.device_type.InFamily(INTEL_FAMILY_9xx)) {
469 		info.shared_info->pll_info.reference_frequency = 96000;	// 96 MHz
470 		info.shared_info->pll_info.max_frequency = 400000;
471 			// 400 MHz RAM DAC speed
472 		info.shared_info->pll_info.min_frequency = 20000;		// 20 MHz
473 	} else {
474 		info.shared_info->pll_info.reference_frequency = 48000;	// 48 MHz
475 		info.shared_info->pll_info.max_frequency = 350000;
476 			// 350 MHz RAM DAC speed
477 		info.shared_info->pll_info.min_frequency = 25000;		// 25 MHz
478 	}
479 
480 	info.shared_info->pll_info.divisor_register = INTEL_DISPLAY_A_PLL_DIVISOR_0;
481 
482 	info.shared_info->pch_info = info.pch_info;
483 
484 	info.shared_info->device_type = info.device_type;
485 #ifdef __HAIKU__
486 	strlcpy(info.shared_info->device_identifier, info.device_identifier,
487 		sizeof(info.shared_info->device_identifier));
488 #else
489 	strcpy(info.shared_info->device_identifier, info.device_identifier);
490 #endif
491 
492 	// setup overlay registers
493 
494 	status_t status = intel_allocate_memory(info, B_PAGE_SIZE, 0,
495 		intel_uses_physical_overlay(*info.shared_info)
496 				? B_APERTURE_NEED_PHYSICAL : 0,
497 		(addr_t*)&info.overlay_registers,
498 		&info.shared_info->physical_overlay_registers);
499 	if (status == B_OK) {
500 		info.shared_info->overlay_offset = (addr_t)info.overlay_registers
501 			- info.aperture_base;
502 		init_overlay_registers(info.overlay_registers);
503 	} else {
504 		ERROR("error: could not allocate overlay memory! %s\n",
505 			strerror(status));
506 	}
507 
508 	// Allocate hardware status page and the cursor memory
509 
510 	if (intel_allocate_memory(info, B_PAGE_SIZE, 0, B_APERTURE_NEED_PHYSICAL,
511 			(addr_t*)info.shared_info->status_page,
512 			&info.shared_info->physical_status_page) == B_OK) {
513 		// TODO: set status page
514 	}
515 	if (hardwareCursor) {
516 		intel_allocate_memory(info, B_PAGE_SIZE, 0, B_APERTURE_NEED_PHYSICAL,
517 			(addr_t*)&info.shared_info->cursor_memory,
518 			&info.shared_info->physical_cursor_memory);
519 	}
520 
521 	edid1_info* edidInfo = (edid1_info*)get_boot_item(VESA_EDID_BOOT_INFO,
522 		NULL);
523 	if (edidInfo != NULL) {
524 		info.shared_info->has_vesa_edid_info = true;
525 		memcpy(&info.shared_info->vesa_edid_info, edidInfo, sizeof(edid1_info));
526 	}
527 
528 	init_interrupt_handler(info);
529 
530 	if (hasPCH) {
531 		if (info.device_type.Generation() == 5) {
532 			info.shared_info->fdi_link_frequency = (read32(info, FDI_PLL_BIOS_0)
533 				& FDI_PLL_FB_CLOCK_MASK) + 2;
534 			info.shared_info->fdi_link_frequency *= 100;
535 		} else {
536 			info.shared_info->fdi_link_frequency = 2700;
537 		}
538 	} else {
539 		info.shared_info->fdi_link_frequency = 0;
540 	}
541 
542 	TRACE("%s: completed successfully!\n", __func__);
543 	return B_OK;
544 }
545 
546 
547 void
548 intel_extreme_uninit(intel_info &info)
549 {
550 	CALLED();
551 
552 	if (!info.fake_interrupts && info.shared_info->vblank_sem > 0) {
553 		bool hasPCH = (info.pch_info != INTEL_PCH_NONE);
554 
555 		// disable interrupt generation
556 		if (hasPCH) {
557 			write32(info, find_reg(info, INTEL_INTERRUPT_ENABLED), 0);
558 			write32(info, find_reg(info, INTEL_INTERRUPT_MASK), ~0);
559 		} else {
560 			write16(info, find_reg(info, INTEL_INTERRUPT_ENABLED), 0);
561 			write16(info, find_reg(info, INTEL_INTERRUPT_MASK), ~0);
562 		}
563 
564 		remove_io_interrupt_handler(info.irq, intel_interrupt_handler, &info);
565 
566 		if (info.use_msi && gPCIx86Module != NULL) {
567 			gPCIx86Module->disable_msi(info.pci->bus,
568 				info.pci->device, info.pci->function);
569 			gPCIx86Module->unconfigure_msi(info.pci->bus,
570 				info.pci->device, info.pci->function);
571 		}
572 	}
573 
574 	gGART->unmap_aperture(info.aperture);
575 
576 	delete_area(info.registers_area);
577 	delete_area(info.shared_area);
578 }
579 
580