xref: /haiku/src/add-ons/kernel/bus_managers/virtio/VirtioQueue.cpp (revision 99158cceddacb52ebe30708c4e6a27b4fb711c8f)
1 /*
2  * Copyright 2013, 2018, Jérôme Duval, jerome.duval@gmail.com.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include "VirtioPrivate.h"
8 
9 
10 static inline uint32
11 round_to_pagesize(uint32 size)
12 {
13 	return (size + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1);
14 }
15 
16 
17 area_id
18 alloc_mem(void **virt, phys_addr_t *phy, size_t size, uint32 protection,
19 	const char *name)
20 {
21 	physical_entry pe;
22 	void * virtadr;
23 	area_id areaid;
24 	status_t rv;
25 
26 	TRACE("allocating %ld bytes for %s\n", size, name);
27 
28 	size = round_to_pagesize(size);
29 	areaid = create_area(name, &virtadr, B_ANY_KERNEL_ADDRESS, size,
30 		B_CONTIGUOUS, protection);
31 	if (areaid < B_OK) {
32 		ERROR("couldn't allocate area %s\n", name);
33 		return B_ERROR;
34 	}
35 	rv = get_memory_map(virtadr, size, &pe, 1);
36 	if (rv < B_OK) {
37 		delete_area(areaid);
38 		ERROR("couldn't get mapping for %s\n", name);
39 		return B_ERROR;
40 	}
41 	if (virt)
42 		*virt = virtadr;
43 	if (phy)
44 		*phy = pe.address;
45 	TRACE("area = %" B_PRId32 ", size = %ld, virt = %p, phy = %#" B_PRIxPHYSADDR "\n",
46 		areaid, size, virtadr, pe.address);
47 	return areaid;
48 }
49 
50 
51 class TransferDescriptor {
52 public:
53 								TransferDescriptor(VirtioQueue* queue,
54 									uint16 indirectMaxSize);
55 								~TransferDescriptor();
56 
57 			status_t			InitCheck() { return fStatus; }
58 
59 			uint16				Size() { return fDescriptorCount; }
60 			void				SetTo(uint16 size, void *cookie);
61 			void*				Cookie() { return fCookie; }
62 			void				Unset();
63 			struct vring_desc*	Indirect() { return fIndirect; }
64 			phys_addr_t			PhysAddr() { return fPhysAddr; }
65 private:
66 			status_t			fStatus;
67 			VirtioQueue*		fQueue;
68 			void*				fCookie;
69 
70 			struct vring_desc* 	fIndirect;
71 			size_t 				fAreaSize;
72 			area_id				fArea;
73 			phys_addr_t 		fPhysAddr;
74 			uint16				fDescriptorCount;
75 };
76 
77 
78 TransferDescriptor::TransferDescriptor(VirtioQueue* queue, uint16 indirectMaxSize)
79 	: fQueue(queue),
80 	fCookie(NULL),
81 	fIndirect(NULL),
82 	fAreaSize(0),
83 	fArea(-1),
84 	fPhysAddr(0),
85 	fDescriptorCount(0)
86 {
87 	fStatus = B_OK;
88 	struct vring_desc* virtAddr;
89 	phys_addr_t physAddr;
90 
91 	if (indirectMaxSize > 0) {
92 		fAreaSize = indirectMaxSize * sizeof(struct vring_desc);
93 		fArea = alloc_mem((void **)&virtAddr, &physAddr, fAreaSize, 0,
94 			"virtqueue");
95 		if (fArea < B_OK) {
96 			fStatus = fArea;
97 			return;
98 		}
99 		memset(virtAddr, 0, fAreaSize);
100 		fIndirect = virtAddr;
101 		fPhysAddr = physAddr;
102 
103 		for (uint16 i = 0; i < indirectMaxSize - 1; i++)
104 			fIndirect[i].next = i + 1;
105 		fIndirect[indirectMaxSize - 1].next = UINT16_MAX;
106 	}
107 }
108 
109 
110 TransferDescriptor::~TransferDescriptor()
111 {
112 	if (fArea > B_OK)
113 		delete_area(fArea);
114 }
115 
116 
117 void
118 TransferDescriptor::SetTo(uint16 size, void *cookie)
119 {
120 	fCookie = cookie;
121 	fDescriptorCount = size;
122 }
123 
124 
125 void
126 TransferDescriptor::Unset()
127 {
128 	fCookie = NULL;
129 	fDescriptorCount = 0;
130 }
131 
132 
133 //	#pragma mark -
134 
135 
136 VirtioQueue::VirtioQueue(VirtioDevice* device, uint16 queueNumber,
137 	uint16 ringSize)
138 	:
139 	fDevice(device),
140 	fQueueNumber(queueNumber),
141 	fRingSize(ringSize),
142 	fRingFree(ringSize),
143 	fRingHeadIndex(0),
144 	fRingUsedIndex(0),
145 	fStatus(B_OK),
146 	fIndirectMaxSize(0),
147 	fCallback(NULL),
148 	fCookie(NULL)
149 {
150 	fDescriptors = new(std::nothrow) TransferDescriptor*[fRingSize];
151 	if (fDescriptors == NULL) {
152 		fStatus = B_NO_MEMORY;
153 		return;
154 	}
155 
156 	uint8* virtAddr;
157 	phys_addr_t physAddr;
158 	fAreaSize = vring_size(fRingSize, device->Alignment());
159 	fArea = alloc_mem((void **)&virtAddr, &physAddr, fAreaSize, 0,
160 		"virtqueue");
161 	if (fArea < B_OK) {
162 		fStatus = fArea;
163 		return;
164 	}
165 	memset(virtAddr, 0, fAreaSize);
166 	vring_init(&fRing, fRingSize, virtAddr, device->Alignment());
167 
168 	for (uint16 i = 0; i < fRingSize - 1; i++)
169 		fRing.desc[i].next = i + 1;
170 	fRing.desc[fRingSize - 1].next = UINT16_MAX;
171 
172 	if ((fDevice->Features() & VIRTIO_FEATURE_RING_INDIRECT_DESC) != 0)
173 		fIndirectMaxSize = 128;
174 
175 	for (uint16 i = 0; i < fRingSize; i++) {
176 		fDescriptors[i] = new TransferDescriptor(this, fIndirectMaxSize);
177 		if (fDescriptors[i] == NULL || fDescriptors[i]->InitCheck() != B_OK) {
178 			fStatus = B_NO_MEMORY;
179 			return;
180 		}
181 	}
182 
183 	DisableInterrupt();
184 
185 	device->SetupQueue(fQueueNumber, physAddr);
186 }
187 
188 
189 VirtioQueue::~VirtioQueue()
190 {
191 	delete_area(fArea);
192 	for (uint16 i = 0; i < fRingSize; i++) {
193 		delete fDescriptors[i];
194 	}
195 	delete[] fDescriptors;
196 }
197 
198 
199 status_t
200 VirtioQueue::SetupInterrupt(virtio_callback_func handler, void *cookie)
201 {
202 	fCallback = handler;
203 	fCookie = cookie;
204 
205 	return B_OK;
206 }
207 
208 
209 
210 void
211 VirtioQueue::DisableInterrupt()
212 {
213 	if ((fDevice->Features() & VIRTIO_FEATURE_RING_EVENT_IDX) == 0)
214 		fRing.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
215 }
216 
217 
218 void
219 VirtioQueue::EnableInterrupt()
220 {
221 	if ((fDevice->Features() & VIRTIO_FEATURE_RING_EVENT_IDX) == 0)
222 		fRing.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
223 }
224 
225 
226 void
227 VirtioQueue::NotifyHost()
228 {
229 	fDevice->NotifyQueue(fQueueNumber);
230 }
231 
232 
233 status_t
234 VirtioQueue::Interrupt()
235 {
236 	CALLED();
237 
238 	DisableInterrupt();
239 
240 	if (fCallback != NULL)
241 		fCallback(Device()->DriverCookie(), fCookie);
242 
243 	EnableInterrupt();
244 	return B_OK;
245 }
246 
247 
248 void*
249 VirtioQueue::Dequeue(uint16 *_size)
250 {
251 	TRACE("Dequeue() fRingUsedIndex: %u\n", fRingUsedIndex);
252 
253 	if (fRingUsedIndex == fRing.used->idx)
254 		return NULL;
255 
256 	uint16 usedIndex = fRingUsedIndex++ & (fRingSize - 1);
257 	TRACE("Dequeue() usedIndex: %u\n", usedIndex);
258 	struct vring_used_elem *element = &fRing.used->ring[usedIndex];
259 	uint16 descriptorIndex = element->id;
260 		// uint32 length = element->len;
261 
262 	void* cookie = fDescriptors[descriptorIndex]->Cookie();
263 	uint16 size = fDescriptors[descriptorIndex]->Size();
264 	if (_size != NULL)
265 		*_size = size;
266 	if (size == 0)
267 		panic("VirtioQueue::Dequeue() size is zero\n");
268 	fDescriptors[descriptorIndex]->Unset();
269 	fRingFree += size;
270 	size--;
271 
272 	uint16 index = descriptorIndex;
273 	if ((fRing.desc[index].flags & VRING_DESC_F_INDIRECT) == 0) {
274 		while ((fRing.desc[index].flags & VRING_DESC_F_NEXT) != 0) {
275 			index = fRing.desc[index].next;
276 			size--;
277 		}
278 	}
279 
280 	if (size > 0)
281 		panic("VirtioQueue::Dequeue() descriptors left %d\n", size);
282 
283 	fRing.desc[index].next = fRingHeadIndex;
284 	fRingHeadIndex = descriptorIndex;
285 	TRACE("Dequeue() fRingHeadIndex: %u\n", fRingHeadIndex);
286 
287 	return cookie;
288 }
289 
290 
291 status_t
292 VirtioQueue::QueueRequest(const physical_entry* vector, size_t readVectorCount,
293 	size_t writtenVectorCount, void *cookie)
294 {
295 	CALLED();
296 	size_t count = readVectorCount + writtenVectorCount;
297 	if (count < 1)
298 		return B_BAD_VALUE;
299 	if ((fDevice->Features() & VIRTIO_FEATURE_RING_INDIRECT_DESC) != 0) {
300 		return QueueRequestIndirect(vector, readVectorCount,
301 			writtenVectorCount, cookie);
302 	}
303 
304 	if (count > fRingFree)
305 		return B_BUSY;
306 
307 	uint16 insertIndex = fRingHeadIndex;
308 	fDescriptors[insertIndex]->SetTo(count, cookie);
309 
310 	// enqueue
311 	uint16 index = QueueVector(insertIndex, fRing.desc, vector,
312 		readVectorCount, writtenVectorCount);
313 
314 	fRingHeadIndex = index;
315 	fRingFree -= count;
316 
317 	UpdateAvailable(insertIndex);
318 
319 	NotifyHost();
320 
321 	return B_OK;
322 }
323 
324 
325 status_t
326 VirtioQueue::QueueRequestIndirect(const physical_entry* vector,
327 	size_t readVectorCount,	size_t writtenVectorCount,
328 	void *cookie)
329 {
330 	CALLED();
331 	size_t count = readVectorCount + writtenVectorCount;
332 	if (count > fRingFree || count > fIndirectMaxSize)
333 		return B_BUSY;
334 
335 	uint16 insertIndex = fRingHeadIndex;
336 	fDescriptors[insertIndex]->SetTo(1, cookie);
337 
338 	// enqueue
339 	uint16 index = QueueVector(0, fDescriptors[insertIndex]->Indirect(),
340 		vector, readVectorCount, writtenVectorCount);
341 
342 	fRing.desc[insertIndex].addr = fDescriptors[insertIndex]->PhysAddr();
343 	fRing.desc[insertIndex].len = index * sizeof(struct vring_desc);
344 	fRing.desc[insertIndex].flags = VRING_DESC_F_INDIRECT;
345 	fRingHeadIndex = fRing.desc[insertIndex].next;
346 	fRingFree--;
347 
348 	UpdateAvailable(insertIndex);
349 
350 	NotifyHost();
351 
352 	return B_OK;
353 }
354 
355 
356 void
357 VirtioQueue::UpdateAvailable(uint16 index)
358 {
359 	CALLED();
360 	uint16 available = fRing.avail->idx & (fRingSize - 1);
361 	fRing.avail->ring[available] = index;
362 	fRing.avail->idx++;
363 }
364 
365 
366 uint16
367 VirtioQueue::QueueVector(uint16 insertIndex, struct vring_desc *desc,
368 	const physical_entry* vector, size_t readVectorCount,
369 	size_t writtenVectorCount)
370 {
371 	CALLED();
372 	uint16 index = insertIndex;
373 	size_t total = readVectorCount + writtenVectorCount;
374 	for (size_t i = 0; i < total; i++, index = desc[index].next) {
375 		desc[index].addr = vector[i].address;
376 		desc[index].len =  vector[i].size;
377 		desc[index].flags = 0;
378 		if (i < total - 1)
379 			desc[index].flags |= VRING_DESC_F_NEXT;
380 		if (i >= readVectorCount)
381 			desc[index].flags |= VRING_DESC_F_WRITE;
382 	}
383 
384 	return index;
385 }
386