xref: /haiku/src/libs/compat/freebsd_network/bus.cpp (revision 2982f45075f9cd41f2f3a5bc3c3451d671add507)
1 /*
2  * Copyright 2007, Hugo Santos. All Rights Reserved.
3  * Copyright 2004, Marcus Overhagen. All Rights Reserved.
4  * Distributed under the terms of the MIT License.
5  */
6 
7 
8 extern "C" {
9 #include "device.h"
10 }
11 
12 #include <cstdlib>
13 
14 #include <arch/cpu.h>
15 #include <int.h>
16 
17 extern "C" {
18 #include <compat/dev/pci/pcireg.h>
19 #include <compat/dev/pci/pcivar.h>
20 #include <compat/machine/resource.h>
21 #include <compat/sys/mutex.h>
22 #include <compat/machine/bus.h>
23 #include <compat/sys/rman.h>
24 #include <compat/sys/bus.h>
25 }
26 
27 
28 //#define DEBUG_BUS_SPACE_RW
29 #ifdef DEBUG_BUS_SPACE_RW
30 #	define TRACE_BUS_SPACE_RW(x) driver_printf x
31 #else
32 #	define TRACE_BUS_SPACE_RW(x)
33 #endif
34 
35 
36 struct internal_intr {
37 	device_t		dev;
38 	driver_filter_t* filter;
39 	driver_intr_t	*handler;
40 	void			*arg;
41 	int				irq;
42 	uint32			flags;
43 
44 	thread_id		thread;
45 	sem_id			sem;
46 	int32			handling;
47 };
48 
49 static int32 intr_wrapper(void *data);
50 
51 
52 static area_id
map_mem(void ** virtualAddr,phys_addr_t _phy,size_t size,uint32 protection,const char * name)53 map_mem(void **virtualAddr, phys_addr_t _phy, size_t size, uint32 protection,
54 	const char *name)
55 {
56 	uint32 offset = _phy & (B_PAGE_SIZE - 1);
57 	phys_addr_t physicalAddr = _phy - offset;
58 	area_id area;
59 
60 	size = roundup(size + offset, B_PAGE_SIZE);
61 	area = map_physical_memory(name, physicalAddr, size, B_ANY_KERNEL_ADDRESS,
62 		protection, virtualAddr);
63 	if (area < B_OK)
64 		return area;
65 
66 	*virtualAddr = (uint8 *)(*virtualAddr) + offset;
67 
68 	return area;
69 }
70 
71 
72 static int
bus_alloc_irq_resource(device_t dev,struct resource * res)73 bus_alloc_irq_resource(device_t dev, struct resource *res)
74 {
75 	uint8 irq = pci_read_config(dev, PCI_interrupt_line, 1);
76 	if (irq == 0 || irq == 0xff)
77 		return -1;
78 
79 	res->r_bustag = BUS_SPACE_TAG_IRQ;
80 	res->r_bushandle = irq;
81 	return 0;
82 }
83 
84 
85 static int
bus_alloc_mem_resource(device_t dev,struct resource * res,pci_info * info,int bar_index)86 bus_alloc_mem_resource(device_t dev, struct resource *res, pci_info *info,
87 	int bar_index)
88 {
89 	phys_addr_t addr = info->u.h0.base_registers[bar_index];
90 	uint64 size = info->u.h0.base_register_sizes[bar_index];
91 	uchar flags = info->u.h0.base_register_flags[bar_index];
92 
93 	// reject empty regions
94 	if (size == 0)
95 		return -1;
96 
97 	// reject I/O space
98 	if ((flags & PCI_address_space) != 0)
99 		return -1;
100 
101 	// TODO: check flags & PCI_address_prefetchable ?
102 
103 	if ((flags & PCI_address_type) == PCI_address_type_64) {
104 		addr |= (uint64)info->u.h0.base_registers[bar_index + 1] << 32;
105 		size |= (uint64)info->u.h0.base_register_sizes[bar_index + 1] << 32;
106 	}
107 
108 	// enable this I/O resource
109 	if (pci_enable_io(dev, SYS_RES_MEMORY) != 0)
110 		return -1;
111 
112 	void *virtualAddr;
113 
114 	res->r_mapped_area = map_mem(&virtualAddr, addr, size,
115 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, "bus_alloc_resource(MEMORY)");
116 	if (res->r_mapped_area < B_OK)
117 		return -1;
118 
119 	res->r_bustag = BUS_SPACE_TAG_MEM;
120 	res->r_bushandle = (bus_space_handle_t)virtualAddr;
121 	return 0;
122 }
123 
124 
125 static int
bus_alloc_ioport_resource(device_t dev,struct resource * res,pci_info * info,int bar_index)126 bus_alloc_ioport_resource(device_t dev, struct resource *res, pci_info *info,
127 	int bar_index)
128 {
129 	uint32 size = info->u.h0.base_register_sizes[bar_index];
130 	uchar flags = info->u.h0.base_register_flags[bar_index];
131 
132 	// reject empty regions
133 	if (size == 0)
134 		return -1;
135 
136 	// reject memory space
137 	if ((flags & PCI_address_space) == 0)
138 		return -1;
139 
140 	// enable this I/O resource
141 	if (pci_enable_io(dev, SYS_RES_IOPORT) != 0)
142 		return -1;
143 
144 	res->r_bustag = BUS_SPACE_TAG_IO;
145 	res->r_bushandle = info->u.h0.base_registers[bar_index];
146 	return 0;
147 }
148 
149 
150 static int
bus_register_to_bar_index(pci_info * info,int regid)151 bus_register_to_bar_index(pci_info *info, int regid)
152 {
153 	// check the offset really is of a BAR
154 	if (regid < PCI_base_registers || (regid % sizeof(uint32) != 0)
155 		|| (regid >= PCI_base_registers + 6 * (int)sizeof(uint32))) {
156 		return -1;
157 	}
158 
159 	// turn offset into array index
160 	regid -= PCI_base_registers;
161 	regid /= sizeof(uint32);
162 	return regid;
163 }
164 
165 
166 struct resource *
bus_alloc_resource(device_t dev,int type,int * rid,unsigned long start,unsigned long end,unsigned long count,uint32 flags)167 bus_alloc_resource(device_t dev, int type, int *rid, unsigned long start,
168 	unsigned long end, unsigned long count, uint32 flags)
169 {
170 	struct resource *res;
171 	int result = -1;
172 
173 	if (type != SYS_RES_IRQ && type != SYS_RES_MEMORY
174 		&& type != SYS_RES_IOPORT)
175 		return NULL;
176 
177 	device_printf(dev, "bus_alloc_resource(%i, [%i], 0x%lx, 0x%lx, 0x%lx,"
178 		"0x%" B_PRIx32 ")\n", type, *rid, start, end, count, flags);
179 
180 	// maybe a local array of resources is enough
181 	res = (struct resource *)malloc(sizeof(struct resource));
182 	if (res == NULL)
183 		return NULL;
184 
185 	if (type == SYS_RES_IRQ) {
186 		if (*rid == 0) {
187 			// pinned interrupt
188 			result = bus_alloc_irq_resource(dev, res);
189 		} else {
190 			// msi or msi-x interrupt at index *rid - 1
191 			pci_info* info = get_device_pci_info(dev);
192 			res->r_bustag = BUS_SPACE_TAG_MSI;
193 			res->r_bushandle = info->u.h0.interrupt_line + *rid - 1;
194 			result = 0;
195 		}
196 	} else if (type == SYS_RES_MEMORY || type == SYS_RES_IOPORT) {
197 		pci_info* info = get_device_pci_info(dev);
198 		int bar_index = bus_register_to_bar_index(info, *rid);
199 		if (bar_index >= 0) {
200 			if (type == SYS_RES_MEMORY)
201 				result = bus_alloc_mem_resource(dev, res, info, bar_index);
202 			else
203 				result = bus_alloc_ioport_resource(dev, res, info, bar_index);
204 		}
205 	}
206 
207 	if (result < 0) {
208 		free(res);
209 		return NULL;
210 	}
211 
212 	res->r_type = type;
213 	return res;
214 }
215 
216 
217 int
bus_release_resource(device_t dev,int type,int rid,struct resource * res)218 bus_release_resource(device_t dev, int type, int rid, struct resource *res)
219 {
220 	if (res->r_type != type)
221 		panic("bus_release_resource: mismatch");
222 
223 	if (type == SYS_RES_MEMORY)
224 		delete_area(res->r_mapped_area);
225 
226 	free(res);
227 	return 0;
228 }
229 
230 
231 int
bus_alloc_resources(device_t dev,struct resource_spec * resourceSpec,struct resource ** resources)232 bus_alloc_resources(device_t dev, struct resource_spec *resourceSpec,
233 	struct resource **resources)
234 {
235 	int i;
236 
237 	for (i = 0; resourceSpec[i].type != -1; i++) {
238 		resources[i] = bus_alloc_resource_any(dev,
239 			resourceSpec[i].type, &resourceSpec[i].rid, resourceSpec[i].flags);
240 		if (resources[i] == NULL
241 			&& (resourceSpec[i].flags & RF_OPTIONAL) == 0) {
242 			for (++i; resourceSpec[i].type != -1; i++) {
243 				resources[i] = NULL;
244 			}
245 
246 			bus_release_resources(dev, resourceSpec, resources);
247 			return ENXIO;
248 		}
249 	}
250 	return 0;
251 }
252 
253 
254 void
bus_release_resources(device_t dev,const struct resource_spec * resourceSpec,struct resource ** resources)255 bus_release_resources(device_t dev, const struct resource_spec *resourceSpec,
256 	struct resource **resources)
257 {
258 	int i;
259 
260 	for (i = 0; resourceSpec[i].type != -1; i++) {
261 		if (resources[i] == NULL)
262 			continue;
263 
264 		bus_release_resource(dev, resourceSpec[i].type, resourceSpec[i].rid,
265 			resources[i]);
266 		resources[i] = NULL;
267 	}
268 }
269 
270 
271 bus_space_handle_t
rman_get_bushandle(struct resource * res)272 rman_get_bushandle(struct resource *res)
273 {
274 	return res->r_bushandle;
275 }
276 
277 
278 bus_space_tag_t
rman_get_bustag(struct resource * res)279 rman_get_bustag(struct resource *res)
280 {
281 	return res->r_bustag;
282 }
283 
284 
285 int
rman_get_rid(struct resource * res)286 rman_get_rid(struct resource *res)
287 {
288 	return 0;
289 }
290 
291 
292 void*
rman_get_virtual(struct resource * res)293 rman_get_virtual(struct resource *res)
294 {
295 	return NULL;
296 }
297 
298 
299 bus_addr_t
rman_get_start(struct resource * res)300 rman_get_start(struct resource *res)
301 {
302 	return res->r_bushandle;
303 }
304 
305 
306 bus_size_t
rman_get_size(struct resource * res)307 rman_get_size(struct resource *res)
308 {
309 	area_info info;
310 	if (get_area_info(res->r_mapped_area, &info) != B_OK)
311 		return 0;
312 	return info.size;
313 }
314 
315 
316 //	#pragma mark - Interrupt handling
317 
318 
319 static int32
intr_wrapper(void * data)320 intr_wrapper(void *data)
321 {
322 	struct internal_intr *intr = (struct internal_intr *)data;
323 
324 	//device_printf(intr->dev, "in interrupt handler.\n");
325 
326 	if (!HAIKU_CHECK_DISABLE_INTERRUPTS(intr->dev))
327 		return B_UNHANDLED_INTERRUPT;
328 
329 	release_sem_etc(intr->sem, 1, B_DO_NOT_RESCHEDULE);
330 	return intr->handling ? B_HANDLED_INTERRUPT : B_INVOKE_SCHEDULER;
331 }
332 
333 
334 static int32
intr_handler(void * data)335 intr_handler(void *data)
336 {
337 	struct internal_intr *intr = (struct internal_intr *)data;
338 	status_t status;
339 
340 	while (1) {
341 		status = acquire_sem(intr->sem);
342 		if (status < B_OK)
343 			break;
344 
345 		//device_printf(intr->dev, "in soft interrupt handler.\n");
346 
347 		atomic_or(&intr->handling, 1);
348 		if ((intr->flags & INTR_MPSAFE) == 0)
349 			mtx_lock(&Giant);
350 
351 		intr->handler(intr->arg);
352 
353 		if ((intr->flags & INTR_MPSAFE) == 0)
354 			mtx_unlock(&Giant);
355 		atomic_and(&intr->handling, 0);
356 		HAIKU_REENABLE_INTERRUPTS(intr->dev);
357 	}
358 
359 	return 0;
360 }
361 
362 
363 static void
free_internal_intr(struct internal_intr * intr)364 free_internal_intr(struct internal_intr *intr)
365 {
366 	if (intr->sem >= B_OK) {
367 		status_t status;
368 		delete_sem(intr->sem);
369 		wait_for_thread(intr->thread, &status);
370 	}
371 
372 	free(intr);
373 }
374 
375 
376 int
bus_setup_intr(device_t dev,struct resource * res,int flags,driver_filter_t * filter,driver_intr_t handler,void * arg,void ** _cookie)377 bus_setup_intr(device_t dev, struct resource *res, int flags,
378 	driver_filter_t* filter, driver_intr_t handler, void *arg, void **_cookie)
379 {
380 	struct internal_intr *intr = (struct internal_intr *)malloc(
381 		sizeof(struct internal_intr));
382 	char semName[64];
383 	status_t status;
384 
385 	if (intr == NULL)
386 		return B_NO_MEMORY;
387 
388 	intr->dev = dev;
389 	intr->filter = filter;
390 	intr->handler = handler;
391 	intr->arg = arg;
392 	intr->irq = res->r_bushandle;
393 	intr->flags = flags;
394 	intr->sem = -1;
395 	intr->thread = -1;
396 
397 	if (filter != NULL) {
398 		status = install_io_interrupt_handler(intr->irq,
399 			(interrupt_handler)intr->filter, intr->arg, 0);
400 	} else {
401 		snprintf(semName, sizeof(semName), "%s intr", dev->device_name);
402 
403 		intr->sem = create_sem(0, semName);
404 		if (intr->sem < B_OK) {
405 			free(intr);
406 			return B_NO_MEMORY;
407 		}
408 
409 		snprintf(semName, sizeof(semName), "%s intr handler", dev->device_name);
410 
411 		intr->thread = spawn_kernel_thread(intr_handler, semName,
412 			B_REAL_TIME_DISPLAY_PRIORITY, intr);
413 		if (intr->thread < B_OK) {
414 			delete_sem(intr->sem);
415 			free(intr);
416 			return B_NO_MEMORY;
417 		}
418 
419 		status = install_io_interrupt_handler(intr->irq,
420 			intr_wrapper, intr, 0);
421 	}
422 
423 	if (status == B_OK && res->r_bustag == BUS_SPACE_TAG_MSI) {
424 		// this is an msi, enable it
425 		struct root_device_softc* root_softc = ((struct root_device_softc *)dev->root->softc);
426 		if (root_softc->is_msi) {
427 			if (gPci->enable_msi(root_softc->pci_info.bus, root_softc->pci_info.device,
428 					root_softc->pci_info.function) != B_OK) {
429 				device_printf(dev, "enabling msi failed\n");
430 				bus_teardown_intr(dev, res, intr);
431 				return ENODEV;
432 			}
433 		} else if (root_softc->is_msix) {
434 			if (gPci->enable_msix(root_softc->pci_info.bus, root_softc->pci_info.device,
435 					root_softc->pci_info.function) != B_OK) {
436 				device_printf(dev, "enabling msix failed\n");
437 				bus_teardown_intr(dev, res, intr);
438 				return ENODEV;
439 			}
440 		}
441 	}
442 
443 	if (status < B_OK) {
444 		free_internal_intr(intr);
445 		return status;
446 	}
447 
448 	resume_thread(intr->thread);
449 
450 	*_cookie = intr;
451 	return 0;
452 }
453 
454 
455 int
bus_teardown_intr(device_t dev,struct resource * res,void * arg)456 bus_teardown_intr(device_t dev, struct resource *res, void *arg)
457 {
458 	struct internal_intr *intr = (struct internal_intr *)arg;
459 	if (intr == NULL)
460 		return -1;
461 
462 	struct root_device_softc *root = (struct root_device_softc *)dev->root->softc;
463 
464 	if (root->is_msi || root->is_msix) {
465 		// disable msi generation
466 		pci_info *info = &root->pci_info;
467 		gPci->disable_msi(info->bus, info->device, info->function);
468 	}
469 
470 	if (intr->filter != NULL) {
471 		remove_io_interrupt_handler(intr->irq, (interrupt_handler)intr->filter,
472 			intr->arg);
473 	} else {
474 		remove_io_interrupt_handler(intr->irq, intr_wrapper, intr);
475 	}
476 
477 	free_internal_intr(intr);
478 	return 0;
479 }
480 
481 
482 int
bus_bind_intr(device_t dev,struct resource * res,int cpu)483 bus_bind_intr(device_t dev, struct resource *res, int cpu)
484 {
485 	if (dev->parent == NULL)
486 		return EINVAL;
487 
488 	// TODO
489 	return 0;
490 }
491 
492 
bus_describe_intr(device_t dev,struct resource * irq,void * cookie,const char * fmt,...)493 int bus_describe_intr(device_t dev, struct resource *irq, void *cookie,
494 	const char* fmt, ...)
495 {
496 	if (dev->parent == NULL)
497 		return EINVAL;
498 
499 	// we don't really support names for interrupts
500 	return 0;
501 }
502 
503 
504 //	#pragma mark - bus functions
505 
506 
507 bus_dma_tag_t
bus_get_dma_tag(device_t dev)508 bus_get_dma_tag(device_t dev)
509 {
510 	return NULL;
511 }
512 
513 
514 int
bus_generic_suspend(device_t dev)515 bus_generic_suspend(device_t dev)
516 {
517 	UNIMPLEMENTED();
518 	return B_ERROR;
519 }
520 
521 
522 int
bus_generic_resume(device_t dev)523 bus_generic_resume(device_t dev)
524 {
525 	UNIMPLEMENTED();
526 	return B_ERROR;
527 }
528 
529 
530 void
bus_generic_shutdown(device_t dev)531 bus_generic_shutdown(device_t dev)
532 {
533 	UNIMPLEMENTED();
534 }
535 
536 
537 int
bus_print_child_header(device_t dev,device_t child)538 bus_print_child_header(device_t dev, device_t child)
539 {
540 	UNIMPLEMENTED();
541 	return B_ERROR;
542 }
543 
544 
545 int
bus_print_child_footer(device_t dev,device_t child)546 bus_print_child_footer(device_t dev, device_t child)
547 {
548 	UNIMPLEMENTED();
549 	return B_ERROR;
550 }
551 
552 
553 int
bus_generic_print_child(device_t dev,device_t child)554 bus_generic_print_child(device_t dev, device_t child)
555 {
556 	UNIMPLEMENTED();
557 	return B_ERROR;
558 }
559 
560 
561 void
bus_generic_driver_added(device_t dev,driver_t * driver)562 bus_generic_driver_added(device_t dev, driver_t *driver)
563 {
564 	UNIMPLEMENTED();
565 }
566 
567 
568 int
bus_child_present(device_t child)569 bus_child_present(device_t child)
570 {
571 	device_t parent = device_get_parent(child);
572 	if (parent == NULL)
573 		return 0;
574 
575 	return bus_child_present(parent);
576 }
577 
578 
579 void
bus_enumerate_hinted_children(device_t bus)580 bus_enumerate_hinted_children(device_t bus)
581 {
582 #if 0
583 	UNIMPLEMENTED();
584 #endif
585 }
586