xref: /haiku/src/add-ons/kernel/drivers/graphics/intel_extreme/intel_extreme.cpp (revision 19ae20e67e91fc09cc9fc5c0e60e21e24e7a53eb)
1 /*
2  * Copyright 2006-2010, Haiku, Inc. All Rights Reserved.
3  * Distributed under the terms of the MIT License.
4  *
5  * Authors:
6  *		Axel Dörfler, axeld@pinc-software.de
7  */
8 
9 
10 #include "intel_extreme.h"
11 
12 #include "AreaKeeper.h"
13 #include "driver.h"
14 #include "utility.h"
15 
16 #include <unistd.h>
17 #include <stdio.h>
18 #include <string.h>
19 #include <errno.h>
20 
21 #include <driver_settings.h>
22 #include <util/kernel_cpp.h>
23 
24 
25 #define TRACE_DEVICE
26 #ifdef TRACE_DEVICE
27 #	define TRACE(x) dprintf x
28 #else
29 #	define TRACE(x) ;
30 #endif
31 
32 
33 static void
34 init_overlay_registers(overlay_registers* registers)
35 {
36 	memset(registers, 0, B_PAGE_SIZE);
37 
38 	registers->contrast_correction = 0x48;
39 	registers->saturation_cos_correction = 0x9a;
40 		// this by-passes contrast and saturation correction
41 }
42 
43 
44 static void
45 read_settings(bool &hardwareCursor)
46 {
47 	hardwareCursor = false;
48 
49 	void* settings = load_driver_settings("intel_extreme");
50 	if (settings != NULL) {
51 		hardwareCursor = get_driver_boolean_parameter(settings,
52 			"hardware_cursor", true, true);
53 
54 		unload_driver_settings(settings);
55 	}
56 }
57 
58 
59 static int32
60 release_vblank_sem(intel_info &info)
61 {
62 	int32 count;
63 	if (get_sem_count(info.shared_info->vblank_sem, &count) == B_OK
64 		&& count < 0) {
65 		release_sem_etc(info.shared_info->vblank_sem, -count,
66 			B_DO_NOT_RESCHEDULE);
67 		return B_INVOKE_SCHEDULER;
68 	}
69 
70 	return B_HANDLED_INTERRUPT;
71 }
72 
73 
74 static int32
75 intel_interrupt_handler(void* data)
76 {
77 	intel_info &info = *(intel_info*)data;
78 
79 	uint16 identity = read16(info, find_reg(info, INTEL_INTERRUPT_IDENTITY));
80 	if (identity == 0)
81 		return B_UNHANDLED_INTERRUPT;
82 
83 	int32 handled = B_HANDLED_INTERRUPT;
84 
85 	// TODO: verify that these aren't actually the same
86 	bool hasPCH = info.device_type.HasPlatformControlHub();
87 	uint16 mask = hasPCH ? PCH_INTERRUPT_VBLANK_PIPEA : INTERRUPT_VBLANK_PIPEA;
88 	if ((identity & mask) != 0) {
89 		handled = release_vblank_sem(info);
90 
91 		// make sure we'll get another one of those
92 		write32(info, INTEL_DISPLAY_A_PIPE_STATUS,
93 			DISPLAY_PIPE_VBLANK_STATUS | DISPLAY_PIPE_VBLANK_ENABLED);
94 	}
95 
96 	mask = hasPCH ? PCH_INTERRUPT_VBLANK_PIPEB : INTERRUPT_VBLANK_PIPEB;
97 	if ((identity & mask) != 0) {
98 		handled = release_vblank_sem(info);
99 
100 		// make sure we'll get another one of those
101 		write32(info, INTEL_DISPLAY_B_PIPE_STATUS,
102 			DISPLAY_PIPE_VBLANK_STATUS | DISPLAY_PIPE_VBLANK_ENABLED);
103 	}
104 
105 	// setting the bit clears it!
106 	write16(info, find_reg(info, INTEL_INTERRUPT_IDENTITY), identity);
107 
108 	return handled;
109 }
110 
111 
112 static void
113 init_interrupt_handler(intel_info &info)
114 {
115 	info.shared_info->vblank_sem = create_sem(0, "intel extreme vblank");
116 	if (info.shared_info->vblank_sem < B_OK)
117 		return;
118 
119 	status_t status = B_OK;
120 
121 	// We need to change the owner of the sem to the calling team (usually the
122 	// app_server), because userland apps cannot acquire kernel semaphores
123 	thread_id thread = find_thread(NULL);
124 	thread_info threadInfo;
125 	if (get_thread_info(thread, &threadInfo) != B_OK
126 		|| set_sem_owner(info.shared_info->vblank_sem, threadInfo.team)
127 			!= B_OK) {
128 		status = B_ERROR;
129 	}
130 
131 	if (status == B_OK && info.pci->u.h0.interrupt_pin != 0x00
132 		&& info.pci->u.h0.interrupt_line != 0xff) {
133 		// we've gotten an interrupt line for us to use
134 
135 		info.fake_interrupts = false;
136 
137 		status = install_io_interrupt_handler(info.pci->u.h0.interrupt_line,
138 			&intel_interrupt_handler, (void*)&info, 0);
139 		if (status == B_OK) {
140 			write32(info, INTEL_DISPLAY_A_PIPE_STATUS,
141 				DISPLAY_PIPE_VBLANK_STATUS | DISPLAY_PIPE_VBLANK_ENABLED);
142 			write32(info, INTEL_DISPLAY_B_PIPE_STATUS,
143 				DISPLAY_PIPE_VBLANK_STATUS | DISPLAY_PIPE_VBLANK_ENABLED);
144 
145 			write16(info, find_reg(info, INTEL_INTERRUPT_IDENTITY), ~0);
146 
147 			// enable interrupts - we only want VBLANK interrupts
148 			bool hasPCH = info.device_type.HasPlatformControlHub();
149 			uint16 enable = hasPCH
150 				? (PCH_INTERRUPT_VBLANK_PIPEA | PCH_INTERRUPT_VBLANK_PIPEB)
151 				: (INTERRUPT_VBLANK_PIPEA | INTERRUPT_VBLANK_PIPEB);
152 
153 			write16(info, find_reg(info, INTEL_INTERRUPT_ENABLED), enable);
154 			write16(info, find_reg(info, INTEL_INTERRUPT_MASK), ~enable);
155 		}
156 	}
157 	if (status < B_OK) {
158 		// There is no interrupt reserved for us, or we couldn't install our
159 		// interrupt handler, let's fake the vblank interrupt for our clients
160 		// using a timer interrupt
161 		info.fake_interrupts = true;
162 
163 		// TODO: fake interrupts!
164 		TRACE((DEVICE_NAME "Fake interrupt mode (no PCI interrupt line "
165 			"assigned)"));
166 		status = B_ERROR;
167 	}
168 
169 	if (status < B_OK) {
170 		delete_sem(info.shared_info->vblank_sem);
171 		info.shared_info->vblank_sem = B_ERROR;
172 	}
173 }
174 
175 
176 //	#pragma mark -
177 
178 
179 status_t
180 intel_free_memory(intel_info &info, addr_t base)
181 {
182 	return gGART->free_memory(info.aperture, base);
183 }
184 
185 
186 status_t
187 intel_allocate_memory(intel_info &info, size_t size, size_t alignment,
188 	uint32 flags, addr_t* _base, phys_addr_t* _physicalBase)
189 {
190 	return gGART->allocate_memory(info.aperture, size, alignment,
191 		flags, _base, _physicalBase);
192 }
193 
194 
195 status_t
196 intel_extreme_init(intel_info &info)
197 {
198 	info.aperture = gGART->map_aperture(info.pci->bus, info.pci->device,
199 		info.pci->function, 0, &info.aperture_base);
200 	if (info.aperture < B_OK)
201 		return info.aperture;
202 
203 	AreaKeeper sharedCreator;
204 	info.shared_area = sharedCreator.Create("intel extreme shared info",
205 		(void**)&info.shared_info, B_ANY_KERNEL_ADDRESS,
206 		ROUND_TO_PAGE_SIZE(sizeof(intel_shared_info)) + 3 * B_PAGE_SIZE,
207 		B_FULL_LOCK, 0);
208 	if (info.shared_area < B_OK) {
209 		gGART->unmap_aperture(info.aperture);
210 		return info.shared_area;
211 	}
212 
213 	memset((void*)info.shared_info, 0, sizeof(intel_shared_info));
214 
215 	int fbIndex = 0;
216 	int mmioIndex = 1;
217 	if (info.device_type.InFamily(INTEL_TYPE_9xx)) {
218 		// For some reason Intel saw the need to change the order of the
219 		// mappings with the introduction of the i9xx family
220 		mmioIndex = 0;
221 		fbIndex = 2;
222 	}
223 
224 	// evaluate driver settings, if any
225 
226 	bool hardwareCursor;
227 	read_settings(hardwareCursor);
228 
229 	// memory mapped I/O
230 
231 	// TODO: registers are mapped twice (by us and intel_gart), maybe we
232 	// can share it between the drivers
233 
234 	AreaKeeper mmioMapper;
235 	info.registers_area = mmioMapper.Map("intel extreme mmio",
236 		(void*)info.pci->u.h0.base_registers[mmioIndex],
237 		info.pci->u.h0.base_register_sizes[mmioIndex],
238 		B_ANY_KERNEL_ADDRESS, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
239 		(void**)&info.registers);
240 	if (mmioMapper.InitCheck() < B_OK) {
241 		dprintf(DEVICE_NAME ": could not map memory I/O!\n");
242 		gGART->unmap_aperture(info.aperture);
243 		return info.registers_area;
244 	}
245 
246 	uint32* blocks = info.shared_info->register_blocks;
247 	blocks[REGISTER_BLOCK(REGS_FLAT)] = 0;
248 
249 	// setup the register blocks for the different architectures
250 	if (info.device_type.HasPlatformControlHub()) {
251 		// PCH based platforms (IronLake and up)
252 		blocks[REGISTER_BLOCK(REGS_NORTH_SHARED)]
253 			= PCH_NORTH_SHARED_REGISTER_BASE;
254 		blocks[REGISTER_BLOCK(REGS_NORTH_PIPE_AND_PORT)]
255 			= PCH_NORTH_PIPE_AND_PORT_REGISTER_BASE;
256 		blocks[REGISTER_BLOCK(REGS_NORTH_PLANE_CONTROL)]
257 			= PCH_NORTH_PLANE_CONTROL_REGISTER_BASE;
258 		blocks[REGISTER_BLOCK(REGS_SOUTH_SHARED)]
259 			= PCH_SOUTH_SHARED_REGISTER_BASE;
260 		blocks[REGISTER_BLOCK(REGS_SOUTH_TRANSCODER_PORT)]
261 			= PCH_SOUTH_TRANSCODER_AND_PORT_REGISTER_BASE;
262 	} else {
263 		// (G)MCH/ICH based platforms
264 		blocks[REGISTER_BLOCK(REGS_NORTH_SHARED)]
265 			= MCH_SHARED_REGISTER_BASE;
266 		blocks[REGISTER_BLOCK(REGS_NORTH_PIPE_AND_PORT)]
267 			= MCH_PIPE_AND_PORT_REGISTER_BASE;
268 		blocks[REGISTER_BLOCK(REGS_NORTH_PLANE_CONTROL)]
269 			= MCH_PLANE_CONTROL_REGISTER_BASE;
270 		blocks[REGISTER_BLOCK(REGS_SOUTH_SHARED)]
271 			= ICH_SHARED_REGISTER_BASE;
272 		blocks[REGISTER_BLOCK(REGS_SOUTH_TRANSCODER_PORT)]
273 			= ICH_PORT_REGISTER_BASE;
274 	}
275 
276 	// make sure bus master, memory-mapped I/O, and frame buffer is enabled
277 	set_pci_config(info.pci, PCI_command, 2, get_pci_config(info.pci,
278 		PCI_command, 2) | PCI_command_io | PCI_command_memory
279 		| PCI_command_master);
280 
281 	// reserve ring buffer memory (currently, this memory is placed in
282 	// the graphics memory), but this could bring us problems with
283 	// write combining...
284 
285 	ring_buffer &primary = info.shared_info->primary_ring_buffer;
286 	if (intel_allocate_memory(info, 16 * B_PAGE_SIZE, 0, 0,
287 			(addr_t*)&primary.base) == B_OK) {
288 		primary.register_base = INTEL_PRIMARY_RING_BUFFER;
289 		primary.size = 16 * B_PAGE_SIZE;
290 		primary.offset = (addr_t)primary.base - info.aperture_base;
291 	}
292 
293 	// Clock gating
294 	// Fix some problems on certain chips (taken from X driver)
295 	// TODO: clean this up
296 	if (info.pci->device_id == 0x2a02 || info.pci->device_id == 0x2a12) {
297 		dprintf("i965GM/i965GME quirk\n");
298 		write32(info, 0x6204, (1L << 29));
299 	} else if (info.device_type.InGroup(INTEL_TYPE_SNB)) {
300 		dprintf("SandyBridge clock gating\n");
301 		write32(info, 0x42020, (1L << 28) | (1L << 7) | (1L << 5));
302 	} else if (info.device_type.InGroup(INTEL_TYPE_ILK)) {
303 		dprintf("IronLake clock gating\n");
304 		write32(info, 0x42020, (1L << 7) | (1L << 5));
305 	} else if (info.device_type.InGroup(INTEL_TYPE_G4x)) {
306 		dprintf("G4x clock gating\n");
307 		write32(info, 0x6204, 0);
308 		write32(info, 0x6208, (1L << 9) | (1L << 7) | (1L << 6));
309 		write32(info, 0x6210, 0);
310 
311 		uint32 gateValue = (1L << 28) | (1L << 3) | (1L << 2);
312 		if ((info.device_type.type & INTEL_TYPE_MOBILE) == INTEL_TYPE_MOBILE) {
313 			dprintf("G4x mobile clock gating\n");
314 		    gateValue |= 1L << 18;
315 		}
316 		write32(info, 0x6200, gateValue);
317 	} else {
318 		dprintf("i965 quirk\n");
319 		write32(info, 0x6204, (1L << 29) | (1L << 23));
320 	}
321 	write32(info, 0x7408, 0x10);
322 
323 	// no errors, so keep areas and mappings
324 	sharedCreator.Detach();
325 	mmioMapper.Detach();
326 
327 	aperture_info apertureInfo;
328 	gGART->get_aperture_info(info.aperture, &apertureInfo);
329 
330 	info.shared_info->registers_area = info.registers_area;
331 	info.shared_info->graphics_memory = (uint8*)info.aperture_base;
332 	info.shared_info->physical_graphics_memory = apertureInfo.physical_base;
333 	info.shared_info->graphics_memory_size = apertureInfo.size;
334 	info.shared_info->frame_buffer = 0;
335 	info.shared_info->dpms_mode = B_DPMS_ON;
336 
337 	if (info.device_type.InFamily(INTEL_TYPE_9xx)) {
338 		info.shared_info->pll_info.reference_frequency = 96000;	// 96 kHz
339 		info.shared_info->pll_info.max_frequency = 400000;
340 			// 400 MHz RAM DAC speed
341 		info.shared_info->pll_info.min_frequency = 20000;		// 20 MHz
342 	} else {
343 		info.shared_info->pll_info.reference_frequency = 48000;	// 48 kHz
344 		info.shared_info->pll_info.max_frequency = 350000;
345 			// 350 MHz RAM DAC speed
346 		info.shared_info->pll_info.min_frequency = 25000;		// 25 MHz
347 	}
348 
349 	info.shared_info->pll_info.divisor_register = INTEL_DISPLAY_A_PLL_DIVISOR_0;
350 
351 	info.shared_info->device_type = info.device_type;
352 #ifdef __HAIKU__
353 	strlcpy(info.shared_info->device_identifier, info.device_identifier,
354 		sizeof(info.shared_info->device_identifier));
355 #else
356 	strcpy(info.shared_info->device_identifier, info.device_identifier);
357 #endif
358 
359 	// setup overlay registers
360 
361 	if (intel_allocate_memory(info, B_PAGE_SIZE, 0,
362 			intel_uses_physical_overlay(*info.shared_info)
363 				? B_APERTURE_NEED_PHYSICAL : 0,
364 			(addr_t*)&info.overlay_registers,
365 			&info.shared_info->physical_overlay_registers) == B_OK) {
366 		info.shared_info->overlay_offset = (addr_t)info.overlay_registers
367 			- info.aperture_base;
368 	}
369 
370 	init_overlay_registers(info.overlay_registers);
371 
372 	// Allocate hardware status page and the cursor memory
373 
374 	if (intel_allocate_memory(info, B_PAGE_SIZE, 0, B_APERTURE_NEED_PHYSICAL,
375 			(addr_t*)info.shared_info->status_page,
376 			&info.shared_info->physical_status_page) == B_OK) {
377 		// TODO: set status page
378 	}
379 	if (hardwareCursor) {
380 		intel_allocate_memory(info, B_PAGE_SIZE, 0, B_APERTURE_NEED_PHYSICAL,
381 			(addr_t*)&info.shared_info->cursor_memory,
382 			&info.shared_info->physical_cursor_memory);
383 	}
384 
385 	init_interrupt_handler(info);
386 
387 	TRACE((DEVICE_NAME "_init() completed successfully!\n"));
388 	return B_OK;
389 }
390 
391 
392 void
393 intel_extreme_uninit(intel_info &info)
394 {
395 	TRACE((DEVICE_NAME": intel_extreme_uninit()\n"));
396 
397 	if (!info.fake_interrupts && info.shared_info->vblank_sem > 0) {
398 		// disable interrupt generation
399 		write16(info, find_reg(info, INTEL_INTERRUPT_ENABLED), 0);
400 		write16(info, find_reg(info, INTEL_INTERRUPT_MASK), ~0);
401 
402 		remove_io_interrupt_handler(info.pci->u.h0.interrupt_line,
403 			intel_interrupt_handler, &info);
404 	}
405 
406 	gGART->unmap_aperture(info.aperture);
407 
408 	delete_area(info.registers_area);
409 	delete_area(info.shared_area);
410 }
411 
412