1 /*
2 * Copyright 2021, Haiku, Inc. All rights reserved.
3 * Distributed under the terms of the MIT License.
4 */
5
6
7 #include "VirtioDevice.h"
8
9 #include <malloc.h>
10 #include <string.h>
11 #include <new>
12
13 #include <KernelExport.h>
14 #include <kernel.h>
15 #include <debug.h>
16
17
18 static inline void
SetLowHi(vuint32 & low,vuint32 & hi,uint64 val)19 SetLowHi(vuint32 &low, vuint32 &hi, uint64 val)
20 {
21 low = (uint32)val;
22 hi = (uint32)(val >> 32);
23 }
24
25
26 // #pragma mark - VirtioQueue
27
28
VirtioQueue(VirtioDevice * dev,int32 id)29 VirtioQueue::VirtioQueue(VirtioDevice *dev, int32 id)
30 :
31 fDev(dev),
32 fId(id),
33 fAllocatedDescs(0),
34 fQueueHandler(NULL),
35 fQueueHandlerCookie(NULL)
36 {
37 }
38
39
~VirtioQueue()40 VirtioQueue::~VirtioQueue()
41 {
42 }
43
44
45 status_t
Init(uint16 requestedSize)46 VirtioQueue::Init(uint16 requestedSize)
47 {
48 fDev->fRegs->queueSel = fId;
49 TRACE("queueNumMax: %d\n", fDev->fRegs->queueNumMax);
50 fQueueLen = fDev->fRegs->queueNumMax;
51 if (requestedSize != 0 && requestedSize > fQueueLen)
52 return B_BUFFER_OVERFLOW;
53
54 fDescCount = fQueueLen;
55 fDev->fRegs->queueNum = fQueueLen;
56 fLastUsed = 0;
57
58 size_t queueMemSize = 0;
59 size_t descsOffset = queueMemSize;
60 queueMemSize += ROUNDUP(sizeof(VirtioDesc) * fDescCount, B_PAGE_SIZE);
61
62 size_t availOffset = queueMemSize;
63 queueMemSize += ROUNDUP(sizeof(VirtioAvail) + sizeof(uint16) * fQueueLen, B_PAGE_SIZE);
64
65 size_t usedOffset = queueMemSize;
66 queueMemSize += ROUNDUP(sizeof(VirtioUsed) + sizeof(VirtioUsedItem) * fQueueLen, B_PAGE_SIZE);
67
68 uint8* queueMem = NULL;
69 fArea.SetTo(create_area("VirtIO Queue", (void**)&queueMem,
70 B_ANY_KERNEL_ADDRESS, queueMemSize, B_CONTIGUOUS,
71 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA));
72
73 if (!fArea.IsSet()) {
74 ERROR("can't create area: %08" B_PRIx32, fArea.Get());
75 return fArea.Get();
76 }
77
78 physical_entry pe;
79 status_t res = get_memory_map(queueMem, queueMemSize, &pe, 1);
80 if (res < B_OK) {
81 ERROR("get_memory_map failed");
82 return res;
83 }
84
85 TRACE("queueMem: %p\n", queueMem);
86
87 memset(queueMem, 0, queueMemSize);
88
89 fDescs = (VirtioDesc*) (queueMem + descsOffset);
90 fAvail = (VirtioAvail*)(queueMem + availOffset);
91 fUsed = (VirtioUsed*) (queueMem + usedOffset);
92
93 if (fDev->fRegs->version >= 2) {
94 phys_addr_t descsPhys = (addr_t)fDescs - (addr_t)queueMem + pe.address;
95 phys_addr_t availPhys = (addr_t)fAvail - (addr_t)queueMem + pe.address;
96 phys_addr_t usedPhys = (addr_t)fUsed - (addr_t)queueMem + pe.address;
97
98 SetLowHi(fDev->fRegs->queueDescLow, fDev->fRegs->queueDescHi, descsPhys);
99 SetLowHi(fDev->fRegs->queueAvailLow, fDev->fRegs->queueAvailHi, availPhys);
100 SetLowHi(fDev->fRegs->queueUsedLow, fDev->fRegs->queueUsedHi, usedPhys);
101 }
102
103 res = fAllocatedDescs.Resize(fDescCount);
104 if (res < B_OK)
105 return res;
106
107 fCookies.SetTo(new(std::nothrow) void*[fDescCount]);
108 if (!fCookies.IsSet())
109 return B_NO_MEMORY;
110
111 if (fDev->fRegs->version == 1) {
112 uint32_t pfn = pe.address / B_PAGE_SIZE;
113 fDev->fRegs->queueAlign = B_PAGE_SIZE;
114 fDev->fRegs->queuePfn = pfn;
115 } else {
116 fDev->fRegs->queueReady = 1;
117 }
118
119 return B_OK;
120 }
121
122
123 int32
AllocDesc()124 VirtioQueue::AllocDesc()
125 {
126 int32 idx = fAllocatedDescs.GetLowestClear();
127 if (idx < 0)
128 return -1;
129
130 fAllocatedDescs.Set(idx);
131 return idx;
132 }
133
134
135 void
FreeDesc(int32 idx)136 VirtioQueue::FreeDesc(int32 idx)
137 {
138 fAllocatedDescs.Clear(idx);
139 }
140
141
142 status_t
Enqueue(const physical_entry * vector,size_t readVectorCount,size_t writtenVectorCount,void * cookie)143 VirtioQueue::Enqueue(const physical_entry* vector,
144 size_t readVectorCount, size_t writtenVectorCount,
145 void* cookie)
146 {
147 int32 firstDesc = -1, lastDesc = -1;
148 size_t count = readVectorCount + writtenVectorCount;
149
150 if (count == 0)
151 return B_OK;
152
153 for (size_t i = 0; i < count; i++) {
154 int32 desc = AllocDesc();
155
156 if (desc < 0) {
157 ERROR("no free virtio descs, queue: %p\n", this);
158
159 if (firstDesc >= 0) {
160 desc = firstDesc;
161 while (kVringDescFlagsNext & fDescs[desc].flags) {
162 int32_t nextDesc = fDescs[desc].next;
163 FreeDesc(desc);
164 desc = nextDesc;
165 }
166 FreeDesc(desc);
167 }
168
169 return B_WOULD_BLOCK;
170 }
171
172 if (i == 0) {
173 firstDesc = desc;
174 } else {
175 fDescs[lastDesc].flags |= kVringDescFlagsNext;
176 fDescs[lastDesc].next = desc;
177 }
178 fDescs[desc].addr = vector[i].address;
179 fDescs[desc].len = vector[i].size;
180 fDescs[desc].flags = 0;
181 fDescs[desc].next = 0;
182 if (i >= readVectorCount)
183 fDescs[desc].flags |= kVringDescFlagsWrite;
184
185 lastDesc = desc;
186 }
187
188 int32_t idx = fAvail->idx & (fQueueLen - 1);
189 fCookies[firstDesc] = cookie;
190 fAvail->ring[idx] = firstDesc;
191 fAvail->idx++;
192 fDev->fRegs->queueNotify = fId;
193
194 return B_OK;
195 }
196
197
198 bool
Dequeue(void ** _cookie,uint32 * _usedLength)199 VirtioQueue::Dequeue(void** _cookie, uint32* _usedLength)
200 {
201 fDev->fRegs->queueSel = fId;
202
203 if (fUsed->idx == fLastUsed)
204 return false;
205
206 int32_t desc = fUsed->ring[fLastUsed & (fQueueLen - 1)].id;
207
208 if (_cookie != NULL)
209 *_cookie = fCookies[desc];
210 fCookies[desc] = NULL;
211
212 if (_usedLength != NULL)
213 *_usedLength = fUsed->ring[fLastUsed & (fQueueLen - 1)].len;
214
215 while (kVringDescFlagsNext & fDescs[desc].flags) {
216 int32_t nextDesc = fDescs[desc].next;
217 FreeDesc(desc);
218 desc = nextDesc;
219 }
220 FreeDesc(desc);
221 fLastUsed++;
222
223 return true;
224 }
225
226
227 // #pragma mark - VirtioIrqHandler
228
229
VirtioIrqHandler(VirtioDevice * dev)230 VirtioIrqHandler::VirtioIrqHandler(VirtioDevice* dev)
231 :
232 fDev(dev)
233 {
234 fReferenceCount = 0;
235 }
236
237
238 void
FirstReferenceAcquired()239 VirtioIrqHandler::FirstReferenceAcquired()
240 {
241 install_io_interrupt_handler(fDev->fIrq, Handle, fDev, 0);
242 }
243
244
245 void
LastReferenceReleased()246 VirtioIrqHandler::LastReferenceReleased()
247 {
248 remove_io_interrupt_handler(fDev->fIrq, Handle, fDev);
249 }
250
251
252 int32
Handle(void * data)253 VirtioIrqHandler::Handle(void* data)
254 {
255 // TRACE("VirtioIrqHandler::Handle(%p)\n", data);
256 VirtioDevice* dev = (VirtioDevice*)data;
257
258 if ((kVirtioIntQueue & dev->fRegs->interruptStatus) != 0) {
259 for (int32 i = 0; i < dev->fQueueCnt; i++) {
260 VirtioQueue* queue = dev->fQueues[i].Get();
261 if (queue->fUsed->idx != queue->fLastUsed
262 && queue->fQueueHandler != NULL) {
263 queue->fQueueHandler(dev->fConfigHandlerCookie,
264 queue->fQueueHandlerCookie);
265 }
266 }
267 dev->fRegs->interruptAck = kVirtioIntQueue;
268 }
269
270 if ((kVirtioIntConfig & dev->fRegs->interruptStatus) != 0) {
271 if (dev->fConfigHandler != NULL)
272 dev->fConfigHandler(dev->fConfigHandlerCookie);
273
274 dev->fRegs->interruptAck = kVirtioIntConfig;
275 }
276
277 return B_HANDLED_INTERRUPT;
278 }
279
280
281 // #pragma mark - VirtioDevice
282
283
VirtioDevice()284 VirtioDevice::VirtioDevice()
285 :
286 fRegs(NULL),
287 fQueueCnt(0),
288 fIrqHandler(this),
289 fConfigHandler(NULL),
290 fConfigHandlerCookie(NULL)
291 {
292 }
293
294
295 status_t
Init(phys_addr_t regs,size_t regsLen,int32 irq,int32 queueCnt)296 VirtioDevice::Init(phys_addr_t regs, size_t regsLen, int32 irq, int32 queueCnt)
297 {
298 fRegsArea.SetTo(map_physical_memory("Virtio MMIO",
299 regs, regsLen, B_ANY_KERNEL_ADDRESS,
300 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
301 (void**)&fRegs));
302
303 if (!fRegsArea.IsSet())
304 return fRegsArea.Get();
305
306 fIrq = irq;
307
308 // Reset
309 fRegs->status = 0;
310
311 return B_OK;
312 }
313