xref: /haiku/src/add-ons/kernel/drivers/graphics/intel_extreme/intel_extreme.cpp (revision 02b72520b683730ce13b30416576ded451fb2843)
1 /*
2  * Copyright 2006-2014, Haiku, Inc. All Rights Reserved.
3  * Distributed under the terms of the MIT License.
4  *
5  * Authors:
6  *		Axel Dörfler, axeld@pinc-software.de
7  */
8 
9 
10 #include "intel_extreme.h"
11 
12 #include "AreaKeeper.h"
13 #include <unistd.h>
14 #include <stdio.h>
15 #include <string.h>
16 #include <errno.h>
17 
18 #include <boot_item.h>
19 #include <driver_settings.h>
20 #include <util/kernel_cpp.h>
21 
22 #include <vesa_info.h>
23 
24 #include "driver.h"
25 #include "power.h"
26 #include "utility.h"
27 
28 
29 #define TRACE_INTELEXTREME
30 #ifdef TRACE_INTELEXTREME
31 #	define TRACE(x...) dprintf("intel_extreme: " x)
32 #else
33 #	define TRACE(x) ;
34 #endif
35 
36 #define ERROR(x...) dprintf("intel_extreme: " x)
37 #define CALLED(x...) TRACE("intel_extreme: CALLED %s\n", __PRETTY_FUNCTION__)
38 
39 
40 static void
41 init_overlay_registers(overlay_registers* registers)
42 {
43 	memset(registers, 0, B_PAGE_SIZE);
44 
45 	registers->contrast_correction = 0x48;
46 	registers->saturation_cos_correction = 0x9a;
47 		// this by-passes contrast and saturation correction
48 }
49 
50 
51 static void
52 read_settings(bool &hardwareCursor)
53 {
54 	hardwareCursor = false;
55 
56 	void* settings = load_driver_settings("intel_extreme");
57 	if (settings != NULL) {
58 		hardwareCursor = get_driver_boolean_parameter(settings,
59 			"hardware_cursor", true, true);
60 
61 		unload_driver_settings(settings);
62 	}
63 }
64 
65 
66 static int32
67 release_vblank_sem(intel_info &info)
68 {
69 	int32 count;
70 	if (get_sem_count(info.shared_info->vblank_sem, &count) == B_OK
71 		&& count < 0) {
72 		release_sem_etc(info.shared_info->vblank_sem, -count,
73 			B_DO_NOT_RESCHEDULE);
74 		return B_INVOKE_SCHEDULER;
75 	}
76 
77 	return B_HANDLED_INTERRUPT;
78 }
79 
80 
81 static int32
82 intel_interrupt_handler(void* data)
83 {
84 	intel_info &info = *(intel_info*)data;
85 	uint32 reg = find_reg(info, INTEL_INTERRUPT_IDENTITY);
86 	uint16 identity = read16(info, reg);
87 	if (identity == 0)
88 		return B_UNHANDLED_INTERRUPT;
89 
90 	int32 handled = B_HANDLED_INTERRUPT;
91 
92 	while (identity != 0) {
93 
94 		// TODO: verify that these aren't actually the same
95 		bool hasPCH = info.device_type.HasPlatformControlHub();
96 		uint16 mask;
97 
98 		// Intel changed the PCH register mapping between Sandy Bridge and the
99 		// later generations (Ivy Bridge and up).
100 		if (info.device_type.InFamily(INTEL_TYPE_SNB)) {
101 			mask = hasPCH ? PCH_INTERRUPT_VBLANK_PIPEA_SNB
102 				: INTERRUPT_VBLANK_PIPEA;
103 			if ((identity & mask) != 0) {
104 				handled = release_vblank_sem(info);
105 
106 				// make sure we'll get another one of those
107 				write32(info, INTEL_DISPLAY_A_PIPE_STATUS,
108 					DISPLAY_PIPE_VBLANK_STATUS | DISPLAY_PIPE_VBLANK_ENABLED);
109 			}
110 
111 			mask = hasPCH ? PCH_INTERRUPT_VBLANK_PIPEB_SNB
112 				: INTERRUPT_VBLANK_PIPEB;
113 			if ((identity & mask) != 0) {
114 				handled = release_vblank_sem(info);
115 
116 				// make sure we'll get another one of those
117 				write32(info, INTEL_DISPLAY_B_PIPE_STATUS,
118 					DISPLAY_PIPE_VBLANK_STATUS | DISPLAY_PIPE_VBLANK_ENABLED);
119 			}
120 		} else {
121 			mask = hasPCH ? PCH_INTERRUPT_VBLANK_PIPEA
122 				: INTERRUPT_VBLANK_PIPEA;
123 			if ((identity & mask) != 0) {
124 				handled = release_vblank_sem(info);
125 
126 				// make sure we'll get another one of those
127 				write32(info, INTEL_DISPLAY_A_PIPE_STATUS,
128 					DISPLAY_PIPE_VBLANK_STATUS | DISPLAY_PIPE_VBLANK_ENABLED);
129 			}
130 
131 			mask = hasPCH ? PCH_INTERRUPT_VBLANK_PIPEB
132 				: INTERRUPT_VBLANK_PIPEB;
133 			if ((identity & mask) != 0) {
134 				handled = release_vblank_sem(info);
135 
136 				// make sure we'll get another one of those
137 				write32(info, INTEL_DISPLAY_B_PIPE_STATUS,
138 					DISPLAY_PIPE_VBLANK_STATUS | DISPLAY_PIPE_VBLANK_ENABLED);
139 			}
140 
141 #if 0
142 			// FIXME we don't have supprot for the 3rd pipe yet
143 			mask = hasPCH ? PCH_INTERRUPT_VBLANK_PIPEC
144 				: 0;
145 			if ((identity & mask) != 0) {
146 				handled = release_vblank_sem(info);
147 
148 				// make sure we'll get another one of those
149 				write32(info, INTEL_DISPLAY_C_PIPE_STATUS,
150 					DISPLAY_PIPE_VBLANK_STATUS | DISPLAY_PIPE_VBLANK_ENABLED);
151 			}
152 #endif
153 		}
154 
155 		// setting the bit clears it!
156 		write16(info, reg, identity);
157 		identity = read16(info, reg);
158 	}
159 
160 	return handled;
161 }
162 
163 
164 static void
165 init_interrupt_handler(intel_info &info)
166 {
167 	info.shared_info->vblank_sem = create_sem(0, "intel extreme vblank");
168 	if (info.shared_info->vblank_sem < B_OK)
169 		return;
170 
171 	status_t status = B_OK;
172 
173 	// We need to change the owner of the sem to the calling team (usually the
174 	// app_server), because userland apps cannot acquire kernel semaphores
175 	thread_id thread = find_thread(NULL);
176 	thread_info threadInfo;
177 	if (get_thread_info(thread, &threadInfo) != B_OK
178 		|| set_sem_owner(info.shared_info->vblank_sem, threadInfo.team)
179 			!= B_OK) {
180 		status = B_ERROR;
181 	}
182 
183 	// Find the right interrupt vector, using MSIs if available.
184 	info.irq = 0xff;
185 	info.use_msi = false;
186 	if (info.pci->u.h0.interrupt_pin != 0x00)
187 		info.irq = info.pci->u.h0.interrupt_line;
188 	if (gPCIx86Module != NULL && gPCIx86Module->get_msi_count(info.pci->bus,
189 			info.pci->device, info.pci->function) >= 1) {
190 		uint8 msiVector = 0;
191 		if (gPCIx86Module->configure_msi(info.pci->bus, info.pci->device,
192 				info.pci->function, 1, &msiVector) == B_OK
193 			&& gPCIx86Module->enable_msi(info.pci->bus, info.pci->device,
194 				info.pci->function) == B_OK) {
195 			ERROR("using message signaled interrupts\n");
196 			info.irq = msiVector;
197 			info.use_msi = true;
198 		}
199 	}
200 
201 	if (status == B_OK && info.irq != 0xff) {
202 		// we've gotten an interrupt line for us to use
203 
204 		info.fake_interrupts = false;
205 
206 		status = install_io_interrupt_handler(info.irq,
207 			&intel_interrupt_handler, (void*)&info, 0);
208 		if (status == B_OK) {
209 			write32(info, INTEL_DISPLAY_A_PIPE_STATUS,
210 				DISPLAY_PIPE_VBLANK_STATUS | DISPLAY_PIPE_VBLANK_ENABLED);
211 			write32(info, INTEL_DISPLAY_B_PIPE_STATUS,
212 				DISPLAY_PIPE_VBLANK_STATUS | DISPLAY_PIPE_VBLANK_ENABLED);
213 
214 			write16(info, find_reg(info, INTEL_INTERRUPT_IDENTITY), ~0);
215 
216 			// enable interrupts - we only want VBLANK interrupts
217 			bool hasPCH = info.device_type.HasPlatformControlHub();
218 			uint16 enable = hasPCH
219 				? (PCH_INTERRUPT_VBLANK_PIPEA | PCH_INTERRUPT_VBLANK_PIPEB)
220 				: (INTERRUPT_VBLANK_PIPEA | INTERRUPT_VBLANK_PIPEB);
221 
222 			write16(info, find_reg(info, INTEL_INTERRUPT_ENABLED), enable);
223 			write16(info, find_reg(info, INTEL_INTERRUPT_MASK), ~enable);
224 		}
225 	}
226 	if (status < B_OK) {
227 		// There is no interrupt reserved for us, or we couldn't install our
228 		// interrupt handler, let's fake the vblank interrupt for our clients
229 		// using a timer interrupt
230 		info.fake_interrupts = true;
231 
232 		// TODO: fake interrupts!
233 		TRACE("Fake interrupt mode (no PCI interrupt line assigned\n");
234 		status = B_ERROR;
235 	}
236 
237 	if (status < B_OK) {
238 		delete_sem(info.shared_info->vblank_sem);
239 		info.shared_info->vblank_sem = B_ERROR;
240 	}
241 }
242 
243 
244 //	#pragma mark -
245 
246 
247 status_t
248 intel_free_memory(intel_info &info, addr_t base)
249 {
250 	return gGART->free_memory(info.aperture, base);
251 }
252 
253 
254 status_t
255 intel_allocate_memory(intel_info &info, size_t size, size_t alignment,
256 	uint32 flags, addr_t* _base, phys_addr_t* _physicalBase)
257 {
258 	return gGART->allocate_memory(info.aperture, size, alignment,
259 		flags, _base, _physicalBase);
260 }
261 
262 
263 status_t
264 intel_extreme_init(intel_info &info)
265 {
266 	CALLED();
267 	info.aperture = gGART->map_aperture(info.pci->bus, info.pci->device,
268 		info.pci->function, 0, &info.aperture_base);
269 	if (info.aperture < B_OK) {
270 		ERROR("error: could not map GART aperture! (%s)\n", strerror(info.aperture));
271 		return info.aperture;
272 	}
273 
274 	AreaKeeper sharedCreator;
275 	info.shared_area = sharedCreator.Create("intel extreme shared info",
276 		(void**)&info.shared_info, B_ANY_KERNEL_ADDRESS,
277 		ROUND_TO_PAGE_SIZE(sizeof(intel_shared_info)) + 3 * B_PAGE_SIZE,
278 		B_FULL_LOCK, 0);
279 	if (info.shared_area < B_OK) {
280 		ERROR("error: could not create shared area!\n");
281 		gGART->unmap_aperture(info.aperture);
282 		return info.shared_area;
283 	}
284 
285 	memset((void*)info.shared_info, 0, sizeof(intel_shared_info));
286 
287 	int fbIndex = 0;
288 	int mmioIndex = 1;
289 	if (info.device_type.InFamily(INTEL_TYPE_9xx)) {
290 		// For some reason Intel saw the need to change the order of the
291 		// mappings with the introduction of the i9xx family
292 		mmioIndex = 0;
293 		fbIndex = 2;
294 	}
295 
296 	// evaluate driver settings, if any
297 
298 	bool hardwareCursor;
299 	read_settings(hardwareCursor);
300 
301 	// memory mapped I/O
302 
303 	// TODO: registers are mapped twice (by us and intel_gart), maybe we
304 	// can share it between the drivers
305 
306 	AreaKeeper mmioMapper;
307 	info.registers_area = mmioMapper.Map("intel extreme mmio",
308 		info.pci->u.h0.base_registers[mmioIndex],
309 		info.pci->u.h0.base_register_sizes[mmioIndex],
310 		B_ANY_KERNEL_ADDRESS, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
311 		(void**)&info.registers);
312 	if (mmioMapper.InitCheck() < B_OK) {
313 		ERROR("error: could not map memory I/O!\n");
314 		gGART->unmap_aperture(info.aperture);
315 		return info.registers_area;
316 	}
317 
318 	uint32* blocks = info.shared_info->register_blocks;
319 	blocks[REGISTER_BLOCK(REGS_FLAT)] = 0;
320 
321 	// setup the register blocks for the different architectures
322 	if (info.device_type.HasPlatformControlHub()) {
323 		// PCH based platforms (IronLake and up)
324 		blocks[REGISTER_BLOCK(REGS_NORTH_SHARED)]
325 			= PCH_NORTH_SHARED_REGISTER_BASE;
326 		blocks[REGISTER_BLOCK(REGS_NORTH_PIPE_AND_PORT)]
327 			= PCH_NORTH_PIPE_AND_PORT_REGISTER_BASE;
328 		blocks[REGISTER_BLOCK(REGS_NORTH_PLANE_CONTROL)]
329 			= PCH_NORTH_PLANE_CONTROL_REGISTER_BASE;
330 		blocks[REGISTER_BLOCK(REGS_SOUTH_SHARED)]
331 			= PCH_SOUTH_SHARED_REGISTER_BASE;
332 		blocks[REGISTER_BLOCK(REGS_SOUTH_TRANSCODER_PORT)]
333 			= PCH_SOUTH_TRANSCODER_AND_PORT_REGISTER_BASE;
334 	} else {
335 		// (G)MCH/ICH based platforms
336 		blocks[REGISTER_BLOCK(REGS_NORTH_SHARED)]
337 			= MCH_SHARED_REGISTER_BASE;
338 		blocks[REGISTER_BLOCK(REGS_NORTH_PIPE_AND_PORT)]
339 			= MCH_PIPE_AND_PORT_REGISTER_BASE;
340 		blocks[REGISTER_BLOCK(REGS_NORTH_PLANE_CONTROL)]
341 			= MCH_PLANE_CONTROL_REGISTER_BASE;
342 		blocks[REGISTER_BLOCK(REGS_SOUTH_SHARED)]
343 			= ICH_SHARED_REGISTER_BASE;
344 		blocks[REGISTER_BLOCK(REGS_SOUTH_TRANSCODER_PORT)]
345 			= ICH_PORT_REGISTER_BASE;
346 	}
347 
348 	// make sure bus master, memory-mapped I/O, and frame buffer is enabled
349 	set_pci_config(info.pci, PCI_command, 2, get_pci_config(info.pci,
350 		PCI_command, 2) | PCI_command_io | PCI_command_memory
351 		| PCI_command_master);
352 
353 	// reserve ring buffer memory (currently, this memory is placed in
354 	// the graphics memory), but this could bring us problems with
355 	// write combining...
356 
357 	ring_buffer &primary = info.shared_info->primary_ring_buffer;
358 	if (intel_allocate_memory(info, 16 * B_PAGE_SIZE, 0, 0,
359 			(addr_t*)&primary.base) == B_OK) {
360 		primary.register_base = INTEL_PRIMARY_RING_BUFFER;
361 		primary.size = 16 * B_PAGE_SIZE;
362 		primary.offset = (addr_t)primary.base - info.aperture_base;
363 	}
364 
365 	// Enable clock gating
366 	intel_en_gating(info);
367 
368 	// Enable automatic gpu downclocking if we can to save power
369 	intel_en_downclock(info);
370 
371 	// no errors, so keep areas and mappings
372 	sharedCreator.Detach();
373 	mmioMapper.Detach();
374 
375 	aperture_info apertureInfo;
376 	gGART->get_aperture_info(info.aperture, &apertureInfo);
377 
378 	info.shared_info->registers_area = info.registers_area;
379 	info.shared_info->graphics_memory = (uint8*)info.aperture_base;
380 	info.shared_info->physical_graphics_memory = apertureInfo.physical_base;
381 	info.shared_info->graphics_memory_size = apertureInfo.size;
382 	info.shared_info->frame_buffer = 0;
383 	info.shared_info->dpms_mode = B_DPMS_ON;
384 
385 	info.shared_info->got_vbt = get_lvds_mode_from_bios(
386 		&info.shared_info->current_mode);
387 	/* at least 855gm can't drive more than one head at time */
388 	if (info.device_type.InFamily(INTEL_TYPE_8xx))
389 		info.shared_info->single_head_locked = 1;
390 
391 	if (info.device_type.InFamily(INTEL_TYPE_9xx)) {
392 		info.shared_info->pll_info.reference_frequency = 96000;	// 96 kHz
393 		info.shared_info->pll_info.max_frequency = 400000;
394 			// 400 MHz RAM DAC speed
395 		info.shared_info->pll_info.min_frequency = 20000;		// 20 MHz
396 	} else {
397 		info.shared_info->pll_info.reference_frequency = 48000;	// 48 kHz
398 		info.shared_info->pll_info.max_frequency = 350000;
399 			// 350 MHz RAM DAC speed
400 		info.shared_info->pll_info.min_frequency = 25000;		// 25 MHz
401 	}
402 
403 	info.shared_info->pll_info.divisor_register = INTEL_DISPLAY_A_PLL_DIVISOR_0;
404 
405 	info.shared_info->device_type = info.device_type;
406 #ifdef __HAIKU__
407 	strlcpy(info.shared_info->device_identifier, info.device_identifier,
408 		sizeof(info.shared_info->device_identifier));
409 #else
410 	strcpy(info.shared_info->device_identifier, info.device_identifier);
411 #endif
412 
413 	// setup overlay registers
414 
415 	status_t status = intel_allocate_memory(info, B_PAGE_SIZE, 0,
416 		intel_uses_physical_overlay(*info.shared_info)
417 				? B_APERTURE_NEED_PHYSICAL : 0,
418 		(addr_t*)&info.overlay_registers,
419 		&info.shared_info->physical_overlay_registers);
420 	if (status == B_OK) {
421 		info.shared_info->overlay_offset = (addr_t)info.overlay_registers
422 			- info.aperture_base;
423 		init_overlay_registers(info.overlay_registers);
424 	} else {
425 		ERROR("error: could not allocate overlay memory! %s\n",
426 			strerror(status));
427 	}
428 
429 	// Allocate hardware status page and the cursor memory
430 
431 	if (intel_allocate_memory(info, B_PAGE_SIZE, 0, B_APERTURE_NEED_PHYSICAL,
432 			(addr_t*)info.shared_info->status_page,
433 			&info.shared_info->physical_status_page) == B_OK) {
434 		// TODO: set status page
435 	}
436 	if (hardwareCursor) {
437 		intel_allocate_memory(info, B_PAGE_SIZE, 0, B_APERTURE_NEED_PHYSICAL,
438 			(addr_t*)&info.shared_info->cursor_memory,
439 			&info.shared_info->physical_cursor_memory);
440 	}
441 
442 	edid1_info* edidInfo = (edid1_info*)get_boot_item(VESA_EDID_BOOT_INFO,
443 		NULL);
444 	if (edidInfo != NULL) {
445 		info.shared_info->has_vesa_edid_info = true;
446 		memcpy(&info.shared_info->vesa_edid_info, edidInfo, sizeof(edid1_info));
447 	}
448 
449 	init_interrupt_handler(info);
450 
451 	TRACE("%s: completed successfully!\n", __func__);
452 	return B_OK;
453 }
454 
455 
456 void
457 intel_extreme_uninit(intel_info &info)
458 {
459 	CALLED();
460 
461 	if (!info.fake_interrupts && info.shared_info->vblank_sem > 0) {
462 		// disable interrupt generation
463 		write16(info, find_reg(info, INTEL_INTERRUPT_ENABLED), 0);
464 		write16(info, find_reg(info, INTEL_INTERRUPT_MASK), ~0);
465 
466 		remove_io_interrupt_handler(info.irq, intel_interrupt_handler, &info);
467 
468 		if (info.use_msi && gPCIx86Module != NULL) {
469 			gPCIx86Module->disable_msi(info.pci->bus,
470 				info.pci->device, info.pci->function);
471 			gPCIx86Module->unconfigure_msi(info.pci->bus,
472 				info.pci->device, info.pci->function);
473 		}
474 	}
475 
476 	gGART->unmap_aperture(info.aperture);
477 
478 	delete_area(info.registers_area);
479 	delete_area(info.shared_area);
480 }
481 
482