xref: /haiku/src/add-ons/kernel/drivers/graphics/intel_extreme/intel_extreme.cpp (revision bd6068614473f87449dfa2eaa67fad1527c61e11)
1 /*
2  * Copyright 2006-2014, Haiku, Inc. All Rights Reserved.
3  * Distributed under the terms of the MIT License.
4  *
5  * Authors:
6  *		Axel Dörfler, axeld@pinc-software.de
7  *		Alexander von Gluck IV, kallisti5@unixzen.com
8  */
9 
10 
11 #include "intel_extreme.h"
12 
13 #include "AreaKeeper.h"
14 #include <unistd.h>
15 #include <stdio.h>
16 #include <string.h>
17 #include <errno.h>
18 
19 #include <boot_item.h>
20 #include <driver_settings.h>
21 #include <util/kernel_cpp.h>
22 
23 #include <vesa_info.h>
24 
25 #include "driver.h"
26 #include "power.h"
27 #include "utility.h"
28 
29 
30 #define TRACE_INTELEXTREME
31 #ifdef TRACE_INTELEXTREME
32 #	define TRACE(x...) dprintf("intel_extreme: " x)
33 #else
34 #	define TRACE(x) ;
35 #endif
36 
37 #define ERROR(x...) dprintf("intel_extreme: " x)
38 #define CALLED(x...) TRACE("intel_extreme: CALLED %s\n", __PRETTY_FUNCTION__)
39 
40 
41 static void
42 init_overlay_registers(overlay_registers* registers)
43 {
44 	memset(registers, 0, B_PAGE_SIZE);
45 
46 	registers->contrast_correction = 0x48;
47 	registers->saturation_cos_correction = 0x9a;
48 		// this by-passes contrast and saturation correction
49 }
50 
51 
52 static void
53 read_settings(bool &hardwareCursor)
54 {
55 	hardwareCursor = false;
56 
57 	void* settings = load_driver_settings("intel_extreme");
58 	if (settings != NULL) {
59 		hardwareCursor = get_driver_boolean_parameter(settings,
60 			"hardware_cursor", true, true);
61 
62 		unload_driver_settings(settings);
63 	}
64 }
65 
66 
67 static int32
68 release_vblank_sem(intel_info &info)
69 {
70 	int32 count;
71 	if (get_sem_count(info.shared_info->vblank_sem, &count) == B_OK
72 		&& count < 0) {
73 		release_sem_etc(info.shared_info->vblank_sem, -count,
74 			B_DO_NOT_RESCHEDULE);
75 		return B_INVOKE_SCHEDULER;
76 	}
77 
78 	return B_HANDLED_INTERRUPT;
79 }
80 
81 
82 /** Get the appropriate interrupt mask for enabling or testing interrupts on
83  * the given pipes.
84  *
85  * The bits to test or set are different depending on the hardware generation.
86  *
87  * \param info Intel_extreme driver information
88  * \param pipes bit mask of the pipes to use
89  * \param enable true to get the mask for enabling the interrupts, false to get
90  *               the mask for testing them.
91  */
92 static uint32
93 intel_get_interrupt_mask(intel_info& info, int pipes, bool enable)
94 {
95 	uint32 mask = 0;
96 	bool hasPCH = info.pch_info != INTEL_PCH_NONE;
97 
98 	// Intel changed the PCH register mapping between Sandy Bridge and the
99 	// later generations (Ivy Bridge and up).
100 
101 	if ((pipes & INTEL_PIPE_A) != 0) {
102 		if (info.device_type.InGroup(INTEL_GROUP_SNB))
103 			mask |= PCH_INTERRUPT_VBLANK_PIPEA_SNB;
104 		else if (hasPCH)
105 			mask |= PCH_INTERRUPT_VBLANK_PIPEA;
106 		else
107 			mask |= INTERRUPT_VBLANK_PIPEA;
108 	}
109 
110 	if ((pipes & INTEL_PIPE_B) != 0) {
111 		if (info.device_type.InGroup(INTEL_GROUP_SNB))
112 			mask |= PCH_INTERRUPT_VBLANK_PIPEB_SNB;
113 		else if (hasPCH)
114 			mask |= PCH_INTERRUPT_VBLANK_PIPEB;
115 		else
116 			mask |= INTERRUPT_VBLANK_PIPEB;
117 	}
118 
119 	// On SandyBridge, there is an extra "global enable" flag, which must also
120 	// be set when enabling the interrupts (but not when testing for them).
121 	if (enable && info.device_type.InGroup(INTEL_GROUP_SNB))
122 		mask |= PCH_INTERRUPT_GLOBAL_SNB;
123 
124 	return mask;
125 }
126 
127 
128 static int32
129 intel_interrupt_handler(void* data)
130 {
131 	intel_info &info = *(intel_info*)data;
132 	uint32 reg = find_reg(info, INTEL_INTERRUPT_IDENTITY);
133 	bool hasPCH = (info.pch_info != INTEL_PCH_NONE);
134 	uint32 identity;
135 
136 	if (hasPCH)
137 		identity = read32(info, reg);
138 	else
139 		identity = read16(info, reg);
140 
141 	if (identity == 0)
142 		return B_UNHANDLED_INTERRUPT;
143 
144 	int32 handled = B_HANDLED_INTERRUPT;
145 
146 	while (identity != 0) {
147 
148 		uint32 mask = intel_get_interrupt_mask(info, INTEL_PIPE_A, false);
149 
150 		if ((identity & mask) != 0) {
151 			handled = release_vblank_sem(info);
152 
153 			// make sure we'll get another one of those
154 			write32(info, INTEL_DISPLAY_A_PIPE_STATUS,
155 				DISPLAY_PIPE_VBLANK_STATUS | DISPLAY_PIPE_VBLANK_ENABLED);
156 		}
157 
158 		mask = intel_get_interrupt_mask(info, INTEL_PIPE_B, false);
159 		if ((identity & mask) != 0) {
160 			handled = release_vblank_sem(info);
161 
162 			// make sure we'll get another one of those
163 			write32(info, INTEL_DISPLAY_B_PIPE_STATUS,
164 				DISPLAY_PIPE_VBLANK_STATUS | DISPLAY_PIPE_VBLANK_ENABLED);
165 		}
166 
167 #if 0
168 		// FIXME we don't have supprot for the 3rd pipe yet
169 		mask = hasPCH ? PCH_INTERRUPT_VBLANK_PIPEC
170 			: 0;
171 		if ((identity & mask) != 0) {
172 			handled = release_vblank_sem(info);
173 
174 			// make sure we'll get another one of those
175 			write32(info, INTEL_DISPLAY_C_PIPE_STATUS,
176 				DISPLAY_PIPE_VBLANK_STATUS | DISPLAY_PIPE_VBLANK_ENABLED);
177 		}
178 #endif
179 
180 		// setting the bit clears it!
181 		if (hasPCH) {
182 			write32(info, reg, identity);
183 			identity = read32(info, reg);
184 		} else {
185 			write16(info, reg, identity);
186 			identity = read16(info, reg);
187 		}
188 	}
189 
190 	return handled;
191 }
192 
193 
194 static void
195 init_interrupt_handler(intel_info &info)
196 {
197 	info.shared_info->vblank_sem = create_sem(0, "intel extreme vblank");
198 	if (info.shared_info->vblank_sem < B_OK)
199 		return;
200 
201 	status_t status = B_OK;
202 
203 	// We need to change the owner of the sem to the calling team (usually the
204 	// app_server), because userland apps cannot acquire kernel semaphores
205 	thread_id thread = find_thread(NULL);
206 	thread_info threadInfo;
207 	if (get_thread_info(thread, &threadInfo) != B_OK
208 		|| set_sem_owner(info.shared_info->vblank_sem, threadInfo.team)
209 			!= B_OK) {
210 		status = B_ERROR;
211 	}
212 
213 	// Find the right interrupt vector, using MSIs if available.
214 	info.irq = 0xff;
215 	info.use_msi = false;
216 	if (info.pci->u.h0.interrupt_pin != 0x00)
217 		info.irq = info.pci->u.h0.interrupt_line;
218 	if (gPCIx86Module != NULL && gPCIx86Module->get_msi_count(info.pci->bus,
219 			info.pci->device, info.pci->function) >= 1) {
220 		uint8 msiVector = 0;
221 		if (gPCIx86Module->configure_msi(info.pci->bus, info.pci->device,
222 				info.pci->function, 1, &msiVector) == B_OK
223 			&& gPCIx86Module->enable_msi(info.pci->bus, info.pci->device,
224 				info.pci->function) == B_OK) {
225 			ERROR("using message signaled interrupts\n");
226 			info.irq = msiVector;
227 			info.use_msi = true;
228 		}
229 	}
230 
231 	if (status == B_OK && info.irq != 0xff) {
232 		// we've gotten an interrupt line for us to use
233 
234 		info.fake_interrupts = false;
235 
236 		status = install_io_interrupt_handler(info.irq,
237 			&intel_interrupt_handler, (void*)&info, 0);
238 		if (status == B_OK) {
239 			write32(info, INTEL_DISPLAY_A_PIPE_STATUS,
240 				DISPLAY_PIPE_VBLANK_STATUS | DISPLAY_PIPE_VBLANK_ENABLED);
241 			write32(info, INTEL_DISPLAY_B_PIPE_STATUS,
242 				DISPLAY_PIPE_VBLANK_STATUS | DISPLAY_PIPE_VBLANK_ENABLED);
243 
244 			bool hasPCH = (info.pch_info != INTEL_PCH_NONE);
245 
246 			uint32 enable = intel_get_interrupt_mask(info,
247 				INTEL_PIPE_A | INTEL_PIPE_B, true);
248 
249 			if (hasPCH) {
250 				// Clear all the interrupts
251 				write32(info, find_reg(info, INTEL_INTERRUPT_IDENTITY), ~0);
252 
253 				// enable interrupts - we only want VBLANK interrupts
254 				write32(info, find_reg(info, INTEL_INTERRUPT_ENABLED), enable);
255 				write32(info, find_reg(info, INTEL_INTERRUPT_MASK), ~enable);
256 			} else {
257 				// Clear all the interrupts
258 				write16(info, find_reg(info, INTEL_INTERRUPT_IDENTITY), ~0);
259 
260 				// enable interrupts - we only want VBLANK interrupts
261 				write16(info, find_reg(info, INTEL_INTERRUPT_ENABLED), enable);
262 				write16(info, find_reg(info, INTEL_INTERRUPT_MASK), ~enable);
263 			}
264 
265 		}
266 	}
267 	if (status < B_OK) {
268 		// There is no interrupt reserved for us, or we couldn't install our
269 		// interrupt handler, let's fake the vblank interrupt for our clients
270 		// using a timer interrupt
271 		info.fake_interrupts = true;
272 
273 		// TODO: fake interrupts!
274 		TRACE("Fake interrupt mode (no PCI interrupt line assigned\n");
275 		status = B_ERROR;
276 	}
277 
278 	if (status < B_OK) {
279 		delete_sem(info.shared_info->vblank_sem);
280 		info.shared_info->vblank_sem = B_ERROR;
281 	}
282 }
283 
284 
285 //	#pragma mark -
286 
287 
288 status_t
289 intel_free_memory(intel_info &info, addr_t base)
290 {
291 	return gGART->free_memory(info.aperture, base);
292 }
293 
294 
295 status_t
296 intel_allocate_memory(intel_info &info, size_t size, size_t alignment,
297 	uint32 flags, addr_t* _base, phys_addr_t* _physicalBase)
298 {
299 	return gGART->allocate_memory(info.aperture, size, alignment,
300 		flags, _base, _physicalBase);
301 }
302 
303 
304 status_t
305 intel_extreme_init(intel_info &info)
306 {
307 	CALLED();
308 	info.aperture = gGART->map_aperture(info.pci->bus, info.pci->device,
309 		info.pci->function, 0, &info.aperture_base);
310 	if (info.aperture < B_OK) {
311 		ERROR("error: could not map GART aperture! (%s)\n", strerror(info.aperture));
312 		return info.aperture;
313 	}
314 
315 	AreaKeeper sharedCreator;
316 	info.shared_area = sharedCreator.Create("intel extreme shared info",
317 		(void**)&info.shared_info, B_ANY_KERNEL_ADDRESS,
318 		ROUND_TO_PAGE_SIZE(sizeof(intel_shared_info)) + 3 * B_PAGE_SIZE,
319 		B_FULL_LOCK, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA | B_USER_CLONEABLE_AREA);
320 	if (info.shared_area < B_OK) {
321 		ERROR("error: could not create shared area!\n");
322 		gGART->unmap_aperture(info.aperture);
323 		return info.shared_area;
324 	}
325 
326 	memset((void*)info.shared_info, 0, sizeof(intel_shared_info));
327 
328 	int fbIndex = 0;
329 	int mmioIndex = 1;
330 	if (info.device_type.Generation() >= 3) {
331 		// For some reason Intel saw the need to change the order of the
332 		// mappings with the introduction of the i9xx family
333 		mmioIndex = 0;
334 		fbIndex = 2;
335 	}
336 
337 	// evaluate driver settings, if any
338 
339 	bool hardwareCursor;
340 	read_settings(hardwareCursor);
341 
342 	// memory mapped I/O
343 
344 	// TODO: registers are mapped twice (by us and intel_gart), maybe we
345 	// can share it between the drivers
346 
347 	AreaKeeper mmioMapper;
348 	info.registers_area = mmioMapper.Map("intel extreme mmio",
349 		info.pci->u.h0.base_registers[mmioIndex],
350 		info.pci->u.h0.base_register_sizes[mmioIndex],
351 		B_ANY_KERNEL_ADDRESS, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
352 		(void**)&info.registers);
353 	if (mmioMapper.InitCheck() < B_OK) {
354 		ERROR("error: could not map memory I/O!\n");
355 		gGART->unmap_aperture(info.aperture);
356 		return info.registers_area;
357 	}
358 
359 	bool hasPCH = (info.pch_info != INTEL_PCH_NONE);
360 
361 	ERROR("Init Intel generation %d GPU %s PCH split.\n",
362 		info.device_type.Generation(), hasPCH ? "with" : "without");
363 
364 	uint32* blocks = info.shared_info->register_blocks;
365 	blocks[REGISTER_BLOCK(REGS_FLAT)] = 0;
366 
367 	// setup the register blocks for the different architectures
368 	if (hasPCH) {
369 		// PCH based platforms (IronLake through ultra-low-power Broadwells)
370 		blocks[REGISTER_BLOCK(REGS_NORTH_SHARED)]
371 			= PCH_NORTH_SHARED_REGISTER_BASE;
372 		blocks[REGISTER_BLOCK(REGS_NORTH_PIPE_AND_PORT)]
373 			= PCH_NORTH_PIPE_AND_PORT_REGISTER_BASE;
374 		blocks[REGISTER_BLOCK(REGS_NORTH_PLANE_CONTROL)]
375 			= PCH_NORTH_PLANE_CONTROL_REGISTER_BASE;
376 		blocks[REGISTER_BLOCK(REGS_SOUTH_SHARED)]
377 			= PCH_SOUTH_SHARED_REGISTER_BASE;
378 		blocks[REGISTER_BLOCK(REGS_SOUTH_TRANSCODER_PORT)]
379 			= PCH_SOUTH_TRANSCODER_AND_PORT_REGISTER_BASE;
380 	} else {
381 		// (G)MCH/ICH based platforms
382 		blocks[REGISTER_BLOCK(REGS_NORTH_SHARED)]
383 			= MCH_SHARED_REGISTER_BASE;
384 		blocks[REGISTER_BLOCK(REGS_NORTH_PIPE_AND_PORT)]
385 			= MCH_PIPE_AND_PORT_REGISTER_BASE;
386 		blocks[REGISTER_BLOCK(REGS_NORTH_PLANE_CONTROL)]
387 			= MCH_PLANE_CONTROL_REGISTER_BASE;
388 		blocks[REGISTER_BLOCK(REGS_SOUTH_SHARED)]
389 			= ICH_SHARED_REGISTER_BASE;
390 		blocks[REGISTER_BLOCK(REGS_SOUTH_TRANSCODER_PORT)]
391 			= ICH_PORT_REGISTER_BASE;
392 	}
393 
394 	// Everything in the display PRM gets +0x180000
395 	if (info.device_type.InGroup(INTEL_GROUP_VLV)) {
396 		// "I nearly got violent with the hw guys when they told me..."
397 		blocks[REGISTER_BLOCK(REGS_SOUTH_SHARED)] += VLV_DISPLAY_BASE;
398 		blocks[REGISTER_BLOCK(REGS_SOUTH_TRANSCODER_PORT)] += VLV_DISPLAY_BASE;
399 	}
400 
401 	TRACE("REGS_NORTH_SHARED: 0x%" B_PRIx32 "\n",
402 		blocks[REGISTER_BLOCK(REGS_NORTH_SHARED)]);
403 	TRACE("REGS_NORTH_PIPE_AND_PORT: 0x%" B_PRIx32 "\n",
404 		blocks[REGISTER_BLOCK(REGS_NORTH_PIPE_AND_PORT)]);
405 	TRACE("REGS_NORTH_PLANE_CONTROL: 0x%" B_PRIx32 "\n",
406 		blocks[REGISTER_BLOCK(REGS_NORTH_PLANE_CONTROL)]);
407 	TRACE("REGS_SOUTH_SHARED: 0x%" B_PRIx32 "\n",
408 		blocks[REGISTER_BLOCK(REGS_SOUTH_SHARED)]);
409 	TRACE("REGS_SOUTH_TRANSCODER_PORT: 0x%" B_PRIx32 "\n",
410 		blocks[REGISTER_BLOCK(REGS_SOUTH_TRANSCODER_PORT)]);
411 
412 	// make sure bus master, memory-mapped I/O, and frame buffer is enabled
413 	set_pci_config(info.pci, PCI_command, 2, get_pci_config(info.pci,
414 		PCI_command, 2) | PCI_command_io | PCI_command_memory
415 		| PCI_command_master);
416 
417 	// reserve ring buffer memory (currently, this memory is placed in
418 	// the graphics memory), but this could bring us problems with
419 	// write combining...
420 
421 	ring_buffer &primary = info.shared_info->primary_ring_buffer;
422 	if (intel_allocate_memory(info, 16 * B_PAGE_SIZE, 0, 0,
423 			(addr_t*)&primary.base) == B_OK) {
424 		primary.register_base = INTEL_PRIMARY_RING_BUFFER;
425 		primary.size = 16 * B_PAGE_SIZE;
426 		primary.offset = (addr_t)primary.base - info.aperture_base;
427 	}
428 
429 	// Enable clock gating
430 	intel_en_gating(info);
431 
432 	// Enable automatic gpu downclocking if we can to save power
433 	intel_en_downclock(info);
434 
435 	// no errors, so keep areas and mappings
436 	sharedCreator.Detach();
437 	mmioMapper.Detach();
438 
439 	aperture_info apertureInfo;
440 	gGART->get_aperture_info(info.aperture, &apertureInfo);
441 
442 	info.shared_info->registers_area = info.registers_area;
443 	info.shared_info->graphics_memory = (uint8*)info.aperture_base;
444 	info.shared_info->physical_graphics_memory = apertureInfo.physical_base;
445 	info.shared_info->graphics_memory_size = apertureInfo.size;
446 	info.shared_info->frame_buffer = 0;
447 	info.shared_info->dpms_mode = B_DPMS_ON;
448 
449 	// Pull VBIOS panel mode for later use
450 	info.shared_info->got_vbt = get_lvds_mode_from_bios(
451 		&info.shared_info->panel_mode);
452 
453 	/* at least 855gm can't drive more than one head at time */
454 	if (info.device_type.InFamily(INTEL_FAMILY_8xx))
455 		info.shared_info->single_head_locked = 1;
456 
457 	if (info.device_type.InFamily(INTEL_FAMILY_9xx)
458 		| info.device_type.InFamily(INTEL_FAMILY_SER5)) {
459 		info.shared_info->pll_info.reference_frequency = 96000;	// 96 kHz
460 		info.shared_info->pll_info.max_frequency = 400000;
461 			// 400 MHz RAM DAC speed
462 		info.shared_info->pll_info.min_frequency = 20000;		// 20 MHz
463 	} else {
464 		info.shared_info->pll_info.reference_frequency = 48000;	// 48 kHz
465 		info.shared_info->pll_info.max_frequency = 350000;
466 			// 350 MHz RAM DAC speed
467 		info.shared_info->pll_info.min_frequency = 25000;		// 25 MHz
468 	}
469 
470 	info.shared_info->pll_info.divisor_register = INTEL_DISPLAY_A_PLL_DIVISOR_0;
471 
472 	info.shared_info->pch_info = info.pch_info;
473 
474 	info.shared_info->device_type = info.device_type;
475 #ifdef __HAIKU__
476 	strlcpy(info.shared_info->device_identifier, info.device_identifier,
477 		sizeof(info.shared_info->device_identifier));
478 #else
479 	strcpy(info.shared_info->device_identifier, info.device_identifier);
480 #endif
481 
482 	// setup overlay registers
483 
484 	status_t status = intel_allocate_memory(info, B_PAGE_SIZE, 0,
485 		intel_uses_physical_overlay(*info.shared_info)
486 				? B_APERTURE_NEED_PHYSICAL : 0,
487 		(addr_t*)&info.overlay_registers,
488 		&info.shared_info->physical_overlay_registers);
489 	if (status == B_OK) {
490 		info.shared_info->overlay_offset = (addr_t)info.overlay_registers
491 			- info.aperture_base;
492 		init_overlay_registers(info.overlay_registers);
493 	} else {
494 		ERROR("error: could not allocate overlay memory! %s\n",
495 			strerror(status));
496 	}
497 
498 	// Allocate hardware status page and the cursor memory
499 
500 	if (intel_allocate_memory(info, B_PAGE_SIZE, 0, B_APERTURE_NEED_PHYSICAL,
501 			(addr_t*)info.shared_info->status_page,
502 			&info.shared_info->physical_status_page) == B_OK) {
503 		// TODO: set status page
504 	}
505 	if (hardwareCursor) {
506 		intel_allocate_memory(info, B_PAGE_SIZE, 0, B_APERTURE_NEED_PHYSICAL,
507 			(addr_t*)&info.shared_info->cursor_memory,
508 			&info.shared_info->physical_cursor_memory);
509 	}
510 
511 	edid1_info* edidInfo = (edid1_info*)get_boot_item(VESA_EDID_BOOT_INFO,
512 		NULL);
513 	if (edidInfo != NULL) {
514 		info.shared_info->has_vesa_edid_info = true;
515 		memcpy(&info.shared_info->vesa_edid_info, edidInfo, sizeof(edid1_info));
516 	}
517 
518 	init_interrupt_handler(info);
519 
520 	if (hasPCH) {
521 		if (info.device_type.Generation() == 5) {
522 			info.shared_info->fdi_link_frequency = (read32(info, FDI_PLL_BIOS_0)
523 				& FDI_PLL_FB_CLOCK_MASK) + 2;
524 			info.shared_info->fdi_link_frequency *= 100;
525 		} else {
526 			info.shared_info->fdi_link_frequency = 2700;
527 		}
528 	} else {
529 		info.shared_info->fdi_link_frequency = 0;
530 	}
531 
532 	TRACE("%s: completed successfully!\n", __func__);
533 	return B_OK;
534 }
535 
536 
537 void
538 intel_extreme_uninit(intel_info &info)
539 {
540 	CALLED();
541 
542 	if (!info.fake_interrupts && info.shared_info->vblank_sem > 0) {
543 		bool hasPCH = (info.pch_info != INTEL_PCH_NONE);
544 
545 		// disable interrupt generation
546 		if (hasPCH) {
547 			write32(info, find_reg(info, INTEL_INTERRUPT_ENABLED), 0);
548 			write32(info, find_reg(info, INTEL_INTERRUPT_MASK), ~0);
549 		} else {
550 			write16(info, find_reg(info, INTEL_INTERRUPT_ENABLED), 0);
551 			write16(info, find_reg(info, INTEL_INTERRUPT_MASK), ~0);
552 		}
553 
554 		remove_io_interrupt_handler(info.irq, intel_interrupt_handler, &info);
555 
556 		if (info.use_msi && gPCIx86Module != NULL) {
557 			gPCIx86Module->disable_msi(info.pci->bus,
558 				info.pci->device, info.pci->function);
559 			gPCIx86Module->unconfigure_msi(info.pci->bus,
560 				info.pci->device, info.pci->function);
561 		}
562 	}
563 
564 	gGART->unmap_aperture(info.aperture);
565 
566 	delete_area(info.registers_area);
567 	delete_area(info.shared_area);
568 }
569 
570