xref: /haiku/src/add-ons/kernel/busses/virtio/virtio_mmio/VirtioDevice.cpp (revision 9e25244c5e9051f6cd333820d6332397361abd6c)
1 /*
2  * Copyright 2021, Haiku, Inc. All rights reserved.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include "VirtioDevice.h"
8 
9 #include <malloc.h>
10 #include <string.h>
11 #include <new>
12 
13 #include <KernelExport.h>
14 #include <kernel.h>
15 #include <debug.h>
16 
17 
18 static inline void
19 SetLowHi(vuint32 &low, vuint32 &hi, uint64 val)
20 {
21 	low = (uint32)val;
22 	hi  = (uint32)(val >> 32);
23 }
24 
25 
26 // #pragma mark - VirtioQueue
27 
28 
29 VirtioQueue::VirtioQueue(VirtioDevice *dev, int32 id)
30 	:
31 	fDev(dev),
32 	fId(id),
33 	fQueueHandler(NULL),
34 	fQueueHandlerCookie(NULL)
35 {
36 }
37 
38 
39 VirtioQueue::~VirtioQueue()
40 {
41 }
42 
43 
44 status_t
45 VirtioQueue::Init()
46 {
47 	fDev->fRegs->queueSel = fId;
48 	TRACE("queueNumMax: %d\n", fDev->fRegs->queueNumMax);
49 	fQueueLen = fDev->fRegs->queueNumMax;
50 	fDev->fRegs->queueNum = fQueueLen;
51 	fLastUsed = 0;
52 
53 	size_t queueMemSize = 0;
54 	fDescs = (VirtioDesc*)queueMemSize;
55 	queueMemSize += ROUNDUP(sizeof(VirtioDesc)
56 		* fQueueLen, B_PAGE_SIZE);
57 
58 	fAvail = (VirtioAvail*)queueMemSize;
59 	queueMemSize += ROUNDUP(sizeof(VirtioAvail)
60 		+ sizeof(uint16) * fQueueLen, B_PAGE_SIZE);
61 
62 	fUsed  = (VirtioUsed*)queueMemSize;
63 	queueMemSize += ROUNDUP(sizeof(VirtioUsed)
64 		+ sizeof(VirtioUsedItem)*fQueueLen, B_PAGE_SIZE);
65 
66 	uint8* queueMem = NULL;
67 	fArea.SetTo(create_area("VirtIO Queue", (void**)&queueMem,
68 		B_ANY_KERNEL_ADDRESS, queueMemSize, B_CONTIGUOUS,
69 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA));
70 
71 	if (!fArea.IsSet()) {
72 		ERROR("can't create area: %08" B_PRIx32, fArea.Get());
73 		return fArea.Get();
74 	}
75 
76 	physical_entry pe;
77 	if (status_t res = get_memory_map(queueMem, queueMemSize, &pe, 1) < B_OK) {
78 		ERROR("get_memory_map failed");
79 		return res;
80 	}
81 
82 	TRACE("queueMem: %p\n", queueMem);
83 
84 	memset(queueMem, 0, queueMemSize);
85 
86 	fDescs = (VirtioDesc*)((uint8*)fDescs + (addr_t)queueMem);
87 	fAvail = (VirtioAvail*)((uint8*)fAvail + (addr_t)queueMem);
88 	fUsed  = (VirtioUsed*)((uint8*)fUsed  + (addr_t)queueMem);
89 
90 	phys_addr_t descsPhys = (addr_t)fDescs - (addr_t)queueMem + pe.address;
91 	phys_addr_t availPhys = (addr_t)fAvail - (addr_t)queueMem + pe.address;
92 	phys_addr_t usedPhys  = (addr_t)fUsed  - (addr_t)queueMem + pe.address;
93 
94 	if (fDev->fRegs->version != 1) {
95 		SetLowHi(fDev->fRegs->queueDescLow,  fDev->fRegs->queueDescHi,  descsPhys);
96 		SetLowHi(fDev->fRegs->queueAvailLow, fDev->fRegs->queueAvailHi, availPhys);
97 		SetLowHi(fDev->fRegs->queueUsedLow,  fDev->fRegs->queueUsedHi,  usedPhys);
98 	}
99 
100 	fFreeDescs.SetTo(new(std::nothrow) uint32[(fQueueLen + 31) / 32]);
101 	if (!fFreeDescs.IsSet())
102 		return B_NO_MEMORY;
103 
104 	memset(fFreeDescs.Get(), 0xff, sizeof(uint32) * ((fQueueLen + 31) / 32));
105 	fCookies.SetTo(new(std::nothrow) void*[fQueueLen]);
106 	if (!fCookies.IsSet())
107 		return B_NO_MEMORY;
108 
109 	if (fDev->fRegs->version == 1) {
110 		uint32_t pfn = descsPhys / B_PAGE_SIZE;
111 		fDev->fRegs->queueAlign = B_PAGE_SIZE;
112 		fDev->fRegs->queuePfn = pfn;
113 	} else {
114 		fDev->fRegs->queueReady = 1;
115 	}
116 
117 	return B_OK;
118 }
119 
120 
121 int32
122 VirtioQueue::AllocDesc()
123 {
124 	for (size_t i = 0; i < fQueueLen; i++) {
125 		if ((fFreeDescs[i / 32] & (1 << (i % 32))) != 0) {
126 			fFreeDescs[i / 32] &= ~((uint32)1 << (i % 32));
127 			return i;
128 		}
129 	}
130 	return -1;
131 }
132 
133 
134 void
135 VirtioQueue::FreeDesc(int32 idx)
136 {
137 	fFreeDescs[idx / 32] |= (uint32)1 << (idx % 32);
138 }
139 
140 
141 status_t
142 VirtioQueue::Enqueue(const physical_entry* vector,
143 	size_t readVectorCount, size_t writtenVectorCount,
144 	void* cookie)
145 {
146 	int32 firstDesc = -1, lastDesc = -1;
147 	size_t count = readVectorCount + writtenVectorCount;
148 
149 	if (count == 0)
150 		return B_OK;
151 
152 	for (size_t i = 0; i < count; i++) {
153 		int32 desc = AllocDesc();
154 
155 		if (desc < 0) {
156 			ERROR("no free virtio descs, queue: %p\n", this);
157 
158 			if (firstDesc >= 0) {
159 				desc = firstDesc;
160 				while (kVringDescFlagsNext & fDescs[desc].flags) {
161 					int32_t nextDesc = fDescs[desc].next;
162 					FreeDesc(desc);
163 					desc = nextDesc;
164 				}
165 				FreeDesc(desc);
166 			}
167 
168 			return B_WOULD_BLOCK;
169 		}
170 
171 		if (i == 0) {
172 			firstDesc = desc;
173 		} else {
174 			fDescs[lastDesc].flags |= kVringDescFlagsNext;
175 			fDescs[lastDesc].next = desc;
176 		}
177 		fDescs[desc].addr = vector[i].address;
178 		fDescs[desc].len = vector[i].size;
179 		fDescs[desc].flags = 0;
180 		fDescs[desc].next = 0;
181 		if (i >= readVectorCount)
182 			fDescs[desc].flags |= kVringDescFlagsWrite;
183 
184 		lastDesc = desc;
185 	}
186 
187 	int32_t idx = fAvail->idx % fQueueLen;
188 	fCookies[idx] = cookie;
189 	fAvail->ring[idx] = firstDesc;
190 	fAvail->idx++;
191 	fDev->fRegs->queueNotify = fId;
192 
193 	return B_OK;
194 }
195 
196 
197 bool
198 VirtioQueue::Dequeue(void** _cookie, uint32* _usedLength)
199 {
200 	fDev->fRegs->queueSel = fId;
201 
202 	if (fUsed->idx == fLastUsed)
203 		return false;
204 
205 	if (_cookie != NULL)
206 		*_cookie = fCookies[fLastUsed % fQueueLen];
207 	fCookies[fLastUsed % fQueueLen] = NULL;
208 
209 	if (_usedLength != NULL)
210 		*_usedLength = fUsed->ring[fLastUsed % fQueueLen].len;
211 
212 	int32_t desc = fUsed->ring[fLastUsed % fQueueLen].id;
213 	while (kVringDescFlagsNext & fDescs[desc].flags) {
214 		int32_t nextDesc = fDescs[desc].next;
215 		FreeDesc(desc);
216 		desc = nextDesc;
217 	}
218 	FreeDesc(desc);
219 	fLastUsed++;
220 
221 	return true;
222 }
223 
224 
225 // #pragma mark - VirtioIrqHandler
226 
227 
228 VirtioIrqHandler::VirtioIrqHandler(VirtioDevice* dev)
229 	:
230 	fDev(dev)
231 {
232 	fReferenceCount = 0;
233 }
234 
235 
236 void
237 VirtioIrqHandler::FirstReferenceAcquired()
238 {
239 	install_io_interrupt_handler(fDev->fIrq, Handle, fDev, 0);
240 }
241 
242 
243 void
244 VirtioIrqHandler::LastReferenceReleased()
245 {
246 	remove_io_interrupt_handler(fDev->fIrq, Handle, fDev);
247 }
248 
249 
250 int32
251 VirtioIrqHandler::Handle(void* data)
252 {
253 	// TRACE("VirtioIrqHandler::Handle(%p)\n", data);
254 	VirtioDevice* dev = (VirtioDevice*)data;
255 
256 	if ((kVirtioIntQueue & dev->fRegs->interruptStatus) != 0) {
257 		for (int32 i = 0; i < dev->fQueueCnt; i++) {
258 			VirtioQueue* queue = dev->fQueues[i].Get();
259 			if (queue->fUsed->idx != queue->fLastUsed
260 				&& queue->fQueueHandler != NULL) {
261 				queue->fQueueHandler(dev->fConfigHandlerCookie,
262 					queue->fQueueHandlerCookie);
263 				}
264 		}
265 		dev->fRegs->interruptAck = kVirtioIntQueue;
266 	}
267 
268 	if ((kVirtioIntConfig & dev->fRegs->interruptStatus) != 0) {
269 		if (dev->fConfigHandler != NULL)
270 			dev->fConfigHandler(dev->fConfigHandlerCookie);
271 
272 		dev->fRegs->interruptAck = kVirtioIntConfig;
273 	}
274 
275 	return B_HANDLED_INTERRUPT;
276 }
277 
278 
279 // #pragma mark - VirtioDevice
280 
281 
282 VirtioDevice::VirtioDevice()
283 	:
284 	fRegs(NULL),
285 	fQueueCnt(0),
286 	fIrqHandler(this),
287 	fConfigHandler(NULL),
288 	fConfigHandlerCookie(NULL)
289 {
290 }
291 
292 
293 status_t
294 VirtioDevice::Init(phys_addr_t regs, size_t regsLen, int32 irq, int32 queueCnt)
295 {
296 	fRegsArea.SetTo(map_physical_memory("Virtio MMIO",
297 		regs, regsLen, B_ANY_KERNEL_ADDRESS,
298 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
299 		(void**)&fRegs));
300 
301 	if (!fRegsArea.IsSet())
302 		return fRegsArea.Get();
303 
304 	fIrq = irq;
305 
306 	// Reset
307 	fRegs->status = 0;
308 
309 	return B_OK;
310 }
311