xref: /haiku/src/add-ons/kernel/busses/virtio/virtio_mmio/VirtioDevice.cpp (revision e1c4049fed1047bdb957b0529e1921e97ef94770)
1 /*
2  * Copyright 2021, Haiku, Inc. All rights reserved.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include "VirtioDevice.h"
8 
9 #include <malloc.h>
10 #include <string.h>
11 #include <new>
12 
13 #include <KernelExport.h>
14 #include <kernel.h>
15 #include <debug.h>
16 
17 
18 static inline void
19 SetLowHi(vuint32 &low, vuint32 &hi, uint64 val)
20 {
21 	low = (uint32)val;
22 	hi  = (uint32)(val >> 32);
23 }
24 
25 
26 // #pragma mark - VirtioQueue
27 
28 
29 VirtioQueue::VirtioQueue(VirtioDevice *dev, int32 id)
30 	:
31 	fDev(dev),
32 	fId(id),
33 	fAllocatedDescs(0),
34 	fQueueHandler(NULL),
35 	fQueueHandlerCookie(NULL)
36 {
37 }
38 
39 
40 VirtioQueue::~VirtioQueue()
41 {
42 }
43 
44 
45 status_t
46 VirtioQueue::Init()
47 {
48 	fDev->fRegs->queueSel = fId;
49 	TRACE("queueNumMax: %d\n", fDev->fRegs->queueNumMax);
50 	fQueueLen = fDev->fRegs->queueNumMax;
51 	fDescCount = fQueueLen;
52 	fDev->fRegs->queueNum = fQueueLen;
53 	fLastUsed = 0;
54 
55 	size_t queueMemSize = 0;
56 	size_t descsOffset = queueMemSize;
57 	queueMemSize += ROUNDUP(sizeof(VirtioDesc) * fDescCount, B_PAGE_SIZE);
58 
59 	size_t availOffset = queueMemSize;
60 	queueMemSize += ROUNDUP(sizeof(VirtioAvail) + sizeof(uint16) * fQueueLen, B_PAGE_SIZE);
61 
62 	size_t usedOffset = queueMemSize;
63 	queueMemSize += ROUNDUP(sizeof(VirtioUsed) + sizeof(VirtioUsedItem) * fQueueLen, B_PAGE_SIZE);
64 
65 	uint8* queueMem = NULL;
66 	fArea.SetTo(create_area("VirtIO Queue", (void**)&queueMem,
67 		B_ANY_KERNEL_ADDRESS, queueMemSize, B_CONTIGUOUS,
68 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA));
69 
70 	if (!fArea.IsSet()) {
71 		ERROR("can't create area: %08" B_PRIx32, fArea.Get());
72 		return fArea.Get();
73 	}
74 
75 	physical_entry pe;
76 	status_t res = get_memory_map(queueMem, queueMemSize, &pe, 1);
77 	if (res < B_OK) {
78 		ERROR("get_memory_map failed");
79 		return res;
80 	}
81 
82 	TRACE("queueMem: %p\n", queueMem);
83 
84 	memset(queueMem, 0, queueMemSize);
85 
86 	fDescs = (VirtioDesc*) (queueMem + descsOffset);
87 	fAvail = (VirtioAvail*)(queueMem + availOffset);
88 	fUsed  = (VirtioUsed*) (queueMem + usedOffset);
89 
90 	if (fDev->fRegs->version >= 2) {
91 		phys_addr_t descsPhys = (addr_t)fDescs - (addr_t)queueMem + pe.address;
92 		phys_addr_t availPhys = (addr_t)fAvail - (addr_t)queueMem + pe.address;
93 		phys_addr_t usedPhys  = (addr_t)fUsed  - (addr_t)queueMem + pe.address;
94 
95 		SetLowHi(fDev->fRegs->queueDescLow,  fDev->fRegs->queueDescHi,  descsPhys);
96 		SetLowHi(fDev->fRegs->queueAvailLow, fDev->fRegs->queueAvailHi, availPhys);
97 		SetLowHi(fDev->fRegs->queueUsedLow,  fDev->fRegs->queueUsedHi,  usedPhys);
98 	}
99 
100 	res = fAllocatedDescs.Resize(fDescCount);
101 	if (res < B_OK)
102 		return res;
103 
104 	fCookies.SetTo(new(std::nothrow) void*[fDescCount]);
105 	if (!fCookies.IsSet())
106 		return B_NO_MEMORY;
107 
108 	if (fDev->fRegs->version == 1) {
109 		uint32_t pfn = pe.address / B_PAGE_SIZE;
110 		fDev->fRegs->queueAlign = B_PAGE_SIZE;
111 		fDev->fRegs->queuePfn = pfn;
112 	} else {
113 		fDev->fRegs->queueReady = 1;
114 	}
115 
116 	return B_OK;
117 }
118 
119 
120 int32
121 VirtioQueue::AllocDesc()
122 {
123 	int32 idx = fAllocatedDescs.GetLowestClear();
124 	if (idx < 0)
125 		return -1;
126 
127 	fAllocatedDescs.Set(idx);
128 	return idx;
129 }
130 
131 
132 void
133 VirtioQueue::FreeDesc(int32 idx)
134 {
135 	fAllocatedDescs.Clear(idx);
136 }
137 
138 
139 status_t
140 VirtioQueue::Enqueue(const physical_entry* vector,
141 	size_t readVectorCount, size_t writtenVectorCount,
142 	void* cookie)
143 {
144 	int32 firstDesc = -1, lastDesc = -1;
145 	size_t count = readVectorCount + writtenVectorCount;
146 
147 	if (count == 0)
148 		return B_OK;
149 
150 	for (size_t i = 0; i < count; i++) {
151 		int32 desc = AllocDesc();
152 
153 		if (desc < 0) {
154 			ERROR("no free virtio descs, queue: %p\n", this);
155 
156 			if (firstDesc >= 0) {
157 				desc = firstDesc;
158 				while (kVringDescFlagsNext & fDescs[desc].flags) {
159 					int32_t nextDesc = fDescs[desc].next;
160 					FreeDesc(desc);
161 					desc = nextDesc;
162 				}
163 				FreeDesc(desc);
164 			}
165 
166 			return B_WOULD_BLOCK;
167 		}
168 
169 		if (i == 0) {
170 			firstDesc = desc;
171 		} else {
172 			fDescs[lastDesc].flags |= kVringDescFlagsNext;
173 			fDescs[lastDesc].next = desc;
174 		}
175 		fDescs[desc].addr = vector[i].address;
176 		fDescs[desc].len = vector[i].size;
177 		fDescs[desc].flags = 0;
178 		fDescs[desc].next = 0;
179 		if (i >= readVectorCount)
180 			fDescs[desc].flags |= kVringDescFlagsWrite;
181 
182 		lastDesc = desc;
183 	}
184 
185 	int32_t idx = fAvail->idx & (fQueueLen - 1);
186 	fCookies[firstDesc] = cookie;
187 	fAvail->ring[idx] = firstDesc;
188 	fAvail->idx++;
189 	fDev->fRegs->queueNotify = fId;
190 
191 	return B_OK;
192 }
193 
194 
195 bool
196 VirtioQueue::Dequeue(void** _cookie, uint32* _usedLength)
197 {
198 	fDev->fRegs->queueSel = fId;
199 
200 	if (fUsed->idx == fLastUsed)
201 		return false;
202 
203 	int32_t desc = fUsed->ring[fLastUsed & (fQueueLen - 1)].id;
204 
205 	if (_cookie != NULL)
206 		*_cookie = fCookies[desc];
207 	fCookies[desc] = NULL;
208 
209 	if (_usedLength != NULL)
210 		*_usedLength = fUsed->ring[fLastUsed & (fQueueLen - 1)].len;
211 
212 	while (kVringDescFlagsNext & fDescs[desc].flags) {
213 		int32_t nextDesc = fDescs[desc].next;
214 		FreeDesc(desc);
215 		desc = nextDesc;
216 	}
217 	FreeDesc(desc);
218 	fLastUsed++;
219 
220 	return true;
221 }
222 
223 
224 // #pragma mark - VirtioIrqHandler
225 
226 
227 VirtioIrqHandler::VirtioIrqHandler(VirtioDevice* dev)
228 	:
229 	fDev(dev)
230 {
231 	fReferenceCount = 0;
232 }
233 
234 
235 void
236 VirtioIrqHandler::FirstReferenceAcquired()
237 {
238 	install_io_interrupt_handler(fDev->fIrq, Handle, fDev, 0);
239 }
240 
241 
242 void
243 VirtioIrqHandler::LastReferenceReleased()
244 {
245 	remove_io_interrupt_handler(fDev->fIrq, Handle, fDev);
246 }
247 
248 
249 int32
250 VirtioIrqHandler::Handle(void* data)
251 {
252 	// TRACE("VirtioIrqHandler::Handle(%p)\n", data);
253 	VirtioDevice* dev = (VirtioDevice*)data;
254 
255 	if ((kVirtioIntQueue & dev->fRegs->interruptStatus) != 0) {
256 		for (int32 i = 0; i < dev->fQueueCnt; i++) {
257 			VirtioQueue* queue = dev->fQueues[i].Get();
258 			if (queue->fUsed->idx != queue->fLastUsed
259 				&& queue->fQueueHandler != NULL) {
260 				queue->fQueueHandler(dev->fConfigHandlerCookie,
261 					queue->fQueueHandlerCookie);
262 				}
263 		}
264 		dev->fRegs->interruptAck = kVirtioIntQueue;
265 	}
266 
267 	if ((kVirtioIntConfig & dev->fRegs->interruptStatus) != 0) {
268 		if (dev->fConfigHandler != NULL)
269 			dev->fConfigHandler(dev->fConfigHandlerCookie);
270 
271 		dev->fRegs->interruptAck = kVirtioIntConfig;
272 	}
273 
274 	return B_HANDLED_INTERRUPT;
275 }
276 
277 
278 // #pragma mark - VirtioDevice
279 
280 
281 VirtioDevice::VirtioDevice()
282 	:
283 	fRegs(NULL),
284 	fQueueCnt(0),
285 	fIrqHandler(this),
286 	fConfigHandler(NULL),
287 	fConfigHandlerCookie(NULL)
288 {
289 }
290 
291 
292 status_t
293 VirtioDevice::Init(phys_addr_t regs, size_t regsLen, int32 irq, int32 queueCnt)
294 {
295 	fRegsArea.SetTo(map_physical_memory("Virtio MMIO",
296 		regs, regsLen, B_ANY_KERNEL_ADDRESS,
297 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
298 		(void**)&fRegs));
299 
300 	if (!fRegsArea.IsSet())
301 		return fRegsArea.Get();
302 
303 	fIrq = irq;
304 
305 	// Reset
306 	fRegs->status = 0;
307 
308 	return B_OK;
309 }
310