xref: /haiku/src/add-ons/kernel/bus_managers/virtio/VirtioQueue.cpp (revision 6f80a9801fedbe7355c4360bd204ba746ec3ec2d)
1 /*
2  * Copyright 2013, 2018, Jérôme Duval, jerome.duval@gmail.com.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include "VirtioPrivate.h"
8 
9 
10 static inline uint32
11 round_to_pagesize(uint32 size)
12 {
13 	return (size + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1);
14 }
15 
16 
17 area_id
18 alloc_mem(void **virt, phys_addr_t *phy, size_t size, uint32 protection,
19 	const char *name)
20 {
21 	physical_entry pe;
22 	void * virtadr;
23 	area_id areaid;
24 	status_t rv;
25 
26 	TRACE("allocating %ld bytes for %s\n", size, name);
27 
28 	size = round_to_pagesize(size);
29 	areaid = create_area(name, &virtadr, B_ANY_KERNEL_ADDRESS, size,
30 		B_CONTIGUOUS, protection);
31 	if (areaid < B_OK) {
32 		ERROR("couldn't allocate area %s\n", name);
33 		return B_ERROR;
34 	}
35 	rv = get_memory_map(virtadr, size, &pe, 1);
36 	if (rv < B_OK) {
37 		delete_area(areaid);
38 		ERROR("couldn't get mapping for %s\n", name);
39 		return B_ERROR;
40 	}
41 	if (virt)
42 		*virt = virtadr;
43 	if (phy)
44 		*phy = pe.address;
45 	TRACE("area = %" B_PRId32 ", size = %ld, virt = %p, phy = %#" B_PRIxPHYSADDR "\n",
46 		areaid, size, virtadr, pe.address);
47 	return areaid;
48 }
49 
50 
51 class TransferDescriptor {
52 public:
53 								TransferDescriptor(VirtioQueue* queue,
54 									uint16 indirectMaxSize);
55 								~TransferDescriptor();
56 
57 			status_t			InitCheck() { return fStatus; }
58 
59 			uint16				Size() { return fDescriptorCount; }
60 			void				SetTo(uint16 size, void *cookie);
61 			void*				Cookie() { return fCookie; }
62 			void				Unset();
63 			struct vring_desc*	Indirect() { return fIndirect; }
64 			phys_addr_t			PhysAddr() { return fPhysAddr; }
65 private:
66 			status_t			fStatus;
67 			VirtioQueue*		fQueue;
68 			void*				fCookie;
69 
70 			struct vring_desc* 	fIndirect;
71 			size_t 				fAreaSize;
72 			area_id				fArea;
73 			phys_addr_t 		fPhysAddr;
74 			uint16				fDescriptorCount;
75 };
76 
77 
78 TransferDescriptor::TransferDescriptor(VirtioQueue* queue, uint16 indirectMaxSize)
79 	: fQueue(queue),
80 	fCookie(NULL),
81 	fIndirect(NULL),
82 	fAreaSize(0),
83 	fArea(-1),
84 	fPhysAddr(0),
85 	fDescriptorCount(0)
86 {
87 	fStatus = B_OK;
88 	struct vring_desc* virtAddr;
89 	phys_addr_t physAddr;
90 
91 	if (indirectMaxSize > 0) {
92 		fAreaSize = indirectMaxSize * sizeof(struct vring_desc);
93 		fArea = alloc_mem((void **)&virtAddr, &physAddr, fAreaSize,
94 			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, "virtqueue");
95 		if (fArea < B_OK) {
96 			fStatus = fArea;
97 			return;
98 		}
99 		memset(virtAddr, 0, fAreaSize);
100 		fIndirect = virtAddr;
101 		fPhysAddr = physAddr;
102 
103 		for (uint16 i = 0; i < indirectMaxSize - 1; i++)
104 			fIndirect[i].next = i + 1;
105 		fIndirect[indirectMaxSize - 1].next = UINT16_MAX;
106 	}
107 }
108 
109 
110 TransferDescriptor::~TransferDescriptor()
111 {
112 	if (fArea > B_OK)
113 		delete_area(fArea);
114 }
115 
116 
117 void
118 TransferDescriptor::SetTo(uint16 size, void *cookie)
119 {
120 	fCookie = cookie;
121 	fDescriptorCount = size;
122 }
123 
124 
125 void
126 TransferDescriptor::Unset()
127 {
128 	fCookie = NULL;
129 	fDescriptorCount = 0;
130 }
131 
132 
133 //	#pragma mark -
134 
135 
136 VirtioQueue::VirtioQueue(VirtioDevice* device, uint16 queueNumber,
137 	uint16 ringSize)
138 	:
139 	fDevice(device),
140 	fQueueNumber(queueNumber),
141 	fRingSize(ringSize),
142 	fRingFree(ringSize),
143 	fRingHeadIndex(0),
144 	fRingUsedIndex(0),
145 	fStatus(B_OK),
146 	fIndirectMaxSize(0),
147 	fCallback(NULL),
148 	fCookie(NULL)
149 {
150 	fDescriptors = new(std::nothrow) TransferDescriptor*[fRingSize];
151 	if (fDescriptors == NULL) {
152 		fStatus = B_NO_MEMORY;
153 		return;
154 	}
155 
156 	uint8* virtAddr;
157 	phys_addr_t physAddr;
158 	fAreaSize = vring_size(fRingSize, device->Alignment());
159 	fArea = alloc_mem((void **)&virtAddr, &physAddr, fAreaSize,
160 		B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, "virtqueue");
161 	if (fArea < B_OK) {
162 		fStatus = fArea;
163 		return;
164 	}
165 	memset(virtAddr, 0, fAreaSize);
166 	vring_init(&fRing, fRingSize, virtAddr, device->Alignment());
167 
168 	for (uint16 i = 0; i < fRingSize - 1; i++)
169 		fRing.desc[i].next = i + 1;
170 	fRing.desc[fRingSize - 1].next = UINT16_MAX;
171 
172 	if ((fDevice->Features() & VIRTIO_FEATURE_RING_INDIRECT_DESC) != 0)
173 		fIndirectMaxSize = 128;
174 
175 	for (uint16 i = 0; i < fRingSize; i++) {
176 		fDescriptors[i] = new TransferDescriptor(this, fIndirectMaxSize);
177 		if (fDescriptors[i] == NULL || fDescriptors[i]->InitCheck() != B_OK) {
178 			fStatus = B_NO_MEMORY;
179 			return;
180 		}
181 	}
182 
183 	DisableInterrupt();
184 
185 	device->SetupQueue(fQueueNumber, physAddr);
186 }
187 
188 
189 VirtioQueue::~VirtioQueue()
190 {
191 	delete_area(fArea);
192 	for (uint16 i = 0; i < fRingSize; i++) {
193 		delete fDescriptors[i];
194 	}
195 	delete[] fDescriptors;
196 }
197 
198 
199 status_t
200 VirtioQueue::SetupInterrupt(virtio_callback_func handler, void *cookie)
201 {
202 	fCallback = handler;
203 	fCookie = cookie;
204 
205 	return B_OK;
206 }
207 
208 
209 
210 void
211 VirtioQueue::DisableInterrupt()
212 {
213 	if ((fDevice->Features() & VIRTIO_FEATURE_RING_EVENT_IDX) == 0)
214 		fRing.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
215 }
216 
217 
218 void
219 VirtioQueue::EnableInterrupt()
220 {
221 	if ((fDevice->Features() & VIRTIO_FEATURE_RING_EVENT_IDX) == 0)
222 		fRing.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
223 }
224 
225 
226 void
227 VirtioQueue::NotifyHost()
228 {
229 	fDevice->NotifyQueue(fQueueNumber);
230 }
231 
232 
233 status_t
234 VirtioQueue::Interrupt()
235 {
236 	CALLED();
237 
238 	DisableInterrupt();
239 
240 	if (fCallback != NULL)
241 		fCallback(Device()->DriverCookie(), fCookie);
242 
243 	EnableInterrupt();
244 	return B_OK;
245 }
246 
247 
248 bool
249 VirtioQueue::Dequeue(void** _cookie, uint32* _usedLength)
250 {
251 	TRACE("Dequeue() fRingUsedIndex: %u\n", fRingUsedIndex);
252 
253 	if (fRingUsedIndex == fRing.used->idx)
254 		return false;
255 
256 	uint16 usedIndex = fRingUsedIndex++ & (fRingSize - 1);
257 	TRACE("Dequeue() usedIndex: %u\n", usedIndex);
258 	struct vring_used_elem *element = &fRing.used->ring[usedIndex];
259 	uint16 descriptorIndex = element->id;
260 	if (_usedLength != NULL)
261 		*_usedLength = element->len;
262 
263 	void* cookie = fDescriptors[descriptorIndex]->Cookie();
264 	if (_cookie != NULL)
265 		*_cookie = cookie;
266 
267 	uint16 size = fDescriptors[descriptorIndex]->Size();
268 	if (size == 0)
269 		panic("VirtioQueue::Dequeue() size is zero\n");
270 	fDescriptors[descriptorIndex]->Unset();
271 	fRingFree += size;
272 	size--;
273 
274 	uint16 index = descriptorIndex;
275 	if ((fRing.desc[index].flags & VRING_DESC_F_INDIRECT) == 0) {
276 		while ((fRing.desc[index].flags & VRING_DESC_F_NEXT) != 0) {
277 			index = fRing.desc[index].next;
278 			size--;
279 		}
280 	}
281 
282 	if (size > 0)
283 		panic("VirtioQueue::Dequeue() descriptors left %d\n", size);
284 
285 	fRing.desc[index].next = fRingHeadIndex;
286 	fRingHeadIndex = descriptorIndex;
287 	TRACE("Dequeue() fRingHeadIndex: %u\n", fRingHeadIndex);
288 
289 	return true;
290 }
291 
292 
293 status_t
294 VirtioQueue::QueueRequest(const physical_entry* vector, size_t readVectorCount,
295 	size_t writtenVectorCount, void *cookie)
296 {
297 	CALLED();
298 	size_t count = readVectorCount + writtenVectorCount;
299 	if (count < 1)
300 		return B_BAD_VALUE;
301 	if ((fDevice->Features() & VIRTIO_FEATURE_RING_INDIRECT_DESC) != 0) {
302 		return QueueRequestIndirect(vector, readVectorCount,
303 			writtenVectorCount, cookie);
304 	}
305 
306 	if (count > fRingFree)
307 		return B_BUSY;
308 
309 	uint16 insertIndex = fRingHeadIndex;
310 	fDescriptors[insertIndex]->SetTo(count, cookie);
311 
312 	// enqueue
313 	uint16 index = QueueVector(insertIndex, fRing.desc, vector,
314 		readVectorCount, writtenVectorCount);
315 
316 	fRingHeadIndex = index;
317 	fRingFree -= count;
318 
319 	UpdateAvailable(insertIndex);
320 
321 	NotifyHost();
322 
323 	return B_OK;
324 }
325 
326 
327 status_t
328 VirtioQueue::QueueRequestIndirect(const physical_entry* vector,
329 	size_t readVectorCount,	size_t writtenVectorCount,
330 	void *cookie)
331 {
332 	CALLED();
333 	size_t count = readVectorCount + writtenVectorCount;
334 	if (count > fRingFree || count > fIndirectMaxSize)
335 		return B_BUSY;
336 
337 	uint16 insertIndex = fRingHeadIndex;
338 	fDescriptors[insertIndex]->SetTo(1, cookie);
339 
340 	// enqueue
341 	uint16 index = QueueVector(0, fDescriptors[insertIndex]->Indirect(),
342 		vector, readVectorCount, writtenVectorCount);
343 
344 	fRing.desc[insertIndex].addr = fDescriptors[insertIndex]->PhysAddr();
345 	fRing.desc[insertIndex].len = index * sizeof(struct vring_desc);
346 	fRing.desc[insertIndex].flags = VRING_DESC_F_INDIRECT;
347 	fRingHeadIndex = fRing.desc[insertIndex].next;
348 	fRingFree--;
349 
350 	UpdateAvailable(insertIndex);
351 
352 	NotifyHost();
353 
354 	return B_OK;
355 }
356 
357 
358 void
359 VirtioQueue::UpdateAvailable(uint16 index)
360 {
361 	CALLED();
362 	uint16 available = fRing.avail->idx & (fRingSize - 1);
363 	fRing.avail->ring[available] = index;
364 	fRing.avail->idx++;
365 }
366 
367 
368 uint16
369 VirtioQueue::QueueVector(uint16 insertIndex, struct vring_desc *desc,
370 	const physical_entry* vector, size_t readVectorCount,
371 	size_t writtenVectorCount)
372 {
373 	CALLED();
374 	uint16 index = insertIndex;
375 	size_t total = readVectorCount + writtenVectorCount;
376 	for (size_t i = 0; i < total; i++, index = desc[index].next) {
377 		desc[index].addr = vector[i].address;
378 		desc[index].len =  vector[i].size;
379 		desc[index].flags = 0;
380 		if (i < total - 1)
381 			desc[index].flags |= VRING_DESC_F_NEXT;
382 		if (i >= readVectorCount)
383 			desc[index].flags |= VRING_DESC_F_WRITE;
384 	}
385 
386 	return index;
387 }
388