xref: /haiku/src/add-ons/kernel/bus_managers/virtio/VirtioQueue.cpp (revision 220d04022750f40f8bac8f01fa551211e28d04f2)
1 /*
2  * Copyright 2013, Jérôme Duval, korli@users.berlios.de.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include "VirtioPrivate.h"
8 
9 
10 static inline uint32
11 round_to_pagesize(uint32 size)
12 {
13 	return (size + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1);
14 }
15 
16 
17 area_id
18 alloc_mem(void **virt, phys_addr_t *phy, size_t size, uint32 protection,
19 	const char *name)
20 {
21 	physical_entry pe;
22 	void * virtadr;
23 	area_id areaid;
24 	status_t rv;
25 
26 	TRACE("allocating %ld bytes for %s\n", size, name);
27 
28 	size = round_to_pagesize(size);
29 	areaid = create_area(name, &virtadr, B_ANY_KERNEL_ADDRESS, size,
30 		B_CONTIGUOUS, protection);
31 	if (areaid < B_OK) {
32 		ERROR("couldn't allocate area %s\n", name);
33 		return B_ERROR;
34 	}
35 	rv = get_memory_map(virtadr, size, &pe, 1);
36 	if (rv < B_OK) {
37 		delete_area(areaid);
38 		ERROR("couldn't get mapping for %s\n", name);
39 		return B_ERROR;
40 	}
41 	if (virt)
42 		*virt = virtadr;
43 	if (phy)
44 		*phy = pe.address;
45 	TRACE("area = %" B_PRId32 ", size = %ld, virt = %p, phy = %#" B_PRIxPHYSADDR "\n",
46 		areaid, size, virtadr, pe.address);
47 	return areaid;
48 }
49 
50 
51 class TransferDescriptor {
52 public:
53 								TransferDescriptor(VirtioQueue* queue,
54 									uint16 indirectMaxSize);
55 								~TransferDescriptor();
56 
57 			status_t			InitCheck() { return fStatus; }
58 
59 			void				Callback();
60 			uint16				Size() { return fDescriptorCount; }
61 			void				SetTo(uint16 size,
62 									virtio_callback_func callback,
63 									void *callbackCookie);
64 			void				Unset();
65 			struct vring_desc*	Indirect() { return fIndirect; }
66 			phys_addr_t			PhysAddr() { return fPhysAddr; }
67 private:
68 			status_t			fStatus;
69 			VirtioQueue*		fQueue;
70 			void*				fCookie;
71 			virtio_callback_func fCallback;
72 
73 			struct vring_desc* 	fIndirect;
74 			size_t 				fAreaSize;
75 			area_id				fArea;
76 			phys_addr_t 		fPhysAddr;
77 			uint16				fDescriptorCount;
78 };
79 
80 
81 TransferDescriptor::TransferDescriptor(VirtioQueue* queue, uint16 indirectMaxSize)
82 	: fQueue(queue),
83 	fCookie(NULL),
84 	fCallback(NULL),
85 	fIndirect(NULL),
86 	fAreaSize(0),
87 	fArea(-1),
88 	fPhysAddr(0),
89 	fDescriptorCount(0)
90 {
91 	fStatus = B_OK;
92 	struct vring_desc* virtAddr;
93 	phys_addr_t physAddr;
94 
95 	if (indirectMaxSize > 0) {
96 		fAreaSize = indirectMaxSize * sizeof(struct vring_desc);
97 		fArea = alloc_mem((void **)&virtAddr, &physAddr, fAreaSize, 0,
98 			"virtqueue");
99 		if (fArea < B_OK) {
100 			fStatus = fArea;
101 			return;
102 		}
103 		memset(virtAddr, 0, fAreaSize);
104 		fIndirect = virtAddr;
105 		fPhysAddr = physAddr;
106 
107 		for (uint16 i = 0; i < indirectMaxSize - 1; i++)
108 			fIndirect[i].next = i + 1;
109 		fIndirect[indirectMaxSize - 1].next = UINT16_MAX;
110 	}
111 }
112 
113 
114 TransferDescriptor::~TransferDescriptor()
115 {
116 	if (fArea > B_OK)
117 		delete_area(fArea);
118 }
119 
120 
121 void
122 TransferDescriptor::Callback()
123 {
124 	if (fCallback != NULL)
125 		fCallback(fQueue->Device()->DriverCookie(), fCookie);
126 }
127 
128 
129 void
130 TransferDescriptor::SetTo(uint16 size, virtio_callback_func callback,
131 	void *callbackCookie)
132 {
133 	fCookie = callbackCookie;
134 	fCallback = callback;
135 	fDescriptorCount = size;
136 }
137 
138 
139 void
140 TransferDescriptor::Unset()
141 {
142 	fCookie = NULL;
143 	fCallback = NULL;
144 	fDescriptorCount = 0;
145 }
146 
147 
148 //	#pragma mark -
149 
150 
151 VirtioQueue::VirtioQueue(VirtioDevice* device, uint16 queueNumber,
152 	uint16 ringSize)
153 	:
154 	fDevice(device),
155 	fQueueNumber(queueNumber),
156 	fRingSize(ringSize),
157 	fRingFree(ringSize),
158 	fRingHeadIndex(0),
159 	fRingUsedIndex(0),
160 	fStatus(B_OK),
161 	fIndirectMaxSize(0)
162 {
163 	fDescriptors = new(std::nothrow) TransferDescriptor*[fRingSize];
164 	if (fDescriptors == NULL) {
165 		fStatus = B_NO_MEMORY;
166 		return;
167 	}
168 
169 	uint8* virtAddr;
170 	phys_addr_t physAddr;
171 	fAreaSize = vring_size(fRingSize, device->Alignment());
172 	fArea = alloc_mem((void **)&virtAddr, &physAddr, fAreaSize, 0,
173 		"virtqueue");
174 	if (fArea < B_OK) {
175 		fStatus = fArea;
176 		return;
177 	}
178 	memset(virtAddr, 0, fAreaSize);
179 	vring_init(&fRing, fRingSize, virtAddr, device->Alignment());
180 
181 	for (uint16 i = 0; i < fRingSize - 1; i++)
182 		fRing.desc[i].next = i + 1;
183 	fRing.desc[fRingSize - 1].next = UINT16_MAX;
184 
185 	if ((fDevice->Features() & VIRTIO_FEATURE_RING_INDIRECT_DESC) != 0)
186 		fIndirectMaxSize = 128;
187 
188 	for (uint16 i = 0; i < fRingSize; i++) {
189 		fDescriptors[i] = new TransferDescriptor(this, fIndirectMaxSize);
190 		if (fDescriptors[i] == NULL || fDescriptors[i]->InitCheck() != B_OK) {
191 			fStatus = B_NO_MEMORY;
192 			return;
193 		}
194 	}
195 
196 	DisableInterrupt();
197 
198 	device->SetupQueue(fQueueNumber, physAddr);
199 }
200 
201 
202 VirtioQueue::~VirtioQueue()
203 {
204 	delete_area(fArea);
205 	for (uint16 i = 0; i < fRingSize; i++) {
206 		delete fDescriptors[i];
207 	}
208 	delete[] fDescriptors;
209 }
210 
211 
212 void
213 VirtioQueue::DisableInterrupt()
214 {
215 	/*if ((fDevice->Features() & VIRTIO_FEATURE_RING_EVENT_IDX) == 0)
216 		fRing.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;*/
217 }
218 
219 
220 void
221 VirtioQueue::EnableInterrupt()
222 {
223 	/*if ((fDevice->Features() & VIRTIO_FEATURE_RING_EVENT_IDX) == 0)
224 		fRing.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;*/
225 }
226 
227 
228 void
229 VirtioQueue::NotifyHost()
230 {
231 	fDevice->NotifyQueue(fQueueNumber);
232 }
233 
234 
235 status_t
236 VirtioQueue::Interrupt()
237 {
238 	CALLED();
239 	DisableInterrupt();
240 
241 	while (fRingUsedIndex != fRing.used->idx)
242 		Finish();
243 
244 	EnableInterrupt();
245 	return B_OK;
246 }
247 
248 
249 void
250 VirtioQueue::Finish()
251 {
252 	TRACE("Finish() fRingUsedIndex: %u\n", fRingUsedIndex);
253 
254 	uint16 usedIndex = fRingUsedIndex++ & (fRingSize - 1);
255 	TRACE("Finish() usedIndex: %u\n", usedIndex);
256 	struct vring_used_elem *element = &fRing.used->ring[usedIndex];
257 	uint16 descriptorIndex = element->id;
258 		// uint32 length = element->len;
259 
260 	fDescriptors[descriptorIndex]->Callback();
261 	uint16 size = fDescriptors[descriptorIndex]->Size();
262 	fDescriptors[descriptorIndex]->Unset();
263 	fRingFree += size;
264 	size--;
265 
266 	uint16 index = descriptorIndex;
267 	while ((fRing.desc[index].flags & VRING_DESC_F_NEXT) != 0) {
268 		index = fRing.desc[index].next;
269 		size--;
270 	}
271 
272 	if (size > 0)
273 		panic("VirtioQueue::Finish() descriptors left %d\n", size);
274 
275 	fRing.desc[index].next = fRingHeadIndex;
276 	fRingHeadIndex = descriptorIndex;
277 	TRACE("Finish() fRingHeadIndex: %u\n", fRingHeadIndex);
278 }
279 
280 
281 status_t
282 VirtioQueue::QueueRequest(const physical_entry* vector, size_t readVectorCount,
283 	size_t writtenVectorCount, virtio_callback_func callback,
284 	void *callbackCookie)
285 {
286 	CALLED();
287 	size_t count = readVectorCount + writtenVectorCount;
288 	if (count < 1)
289 		return B_BAD_VALUE;
290 	if ((fDevice->Features() & VIRTIO_FEATURE_RING_INDIRECT_DESC) != 0) {
291 		return QueueRequestIndirect(vector, readVectorCount,
292 			writtenVectorCount, callback, callbackCookie);
293 	}
294 
295 	if (count > fRingFree)
296 		return B_BUSY;
297 
298 	uint16 insertIndex = fRingHeadIndex;
299 	fDescriptors[insertIndex]->SetTo(count, callback,
300 		callbackCookie);
301 
302 	// enqueue
303 	uint16 index = QueueVector(insertIndex, fRing.desc, vector,
304 		readVectorCount, writtenVectorCount);
305 
306 	fRingHeadIndex = index;
307 	fRingFree -= count;
308 
309 	UpdateAvailable(insertIndex);
310 
311 	NotifyHost();
312 
313 	return B_OK;
314 }
315 
316 
317 status_t
318 VirtioQueue::QueueRequestIndirect(const physical_entry* vector,
319 	size_t readVectorCount,	size_t writtenVectorCount,
320 	virtio_callback_func callback, void *callbackCookie)
321 {
322 	CALLED();
323 	size_t count = readVectorCount + writtenVectorCount;
324 	if (count > fRingFree || count > fIndirectMaxSize)
325 		return B_BUSY;
326 
327 	uint16 insertIndex = fRingHeadIndex;
328 	fDescriptors[insertIndex]->SetTo(1, callback,
329 		callbackCookie);
330 
331 	// enqueue
332 	uint16 index = QueueVector(0, fDescriptors[insertIndex]->Indirect(),
333 		vector, readVectorCount, writtenVectorCount);
334 
335 	fRing.desc[insertIndex].addr = fDescriptors[insertIndex]->PhysAddr();
336 	fRing.desc[insertIndex].len = index * sizeof(struct vring_desc);
337 	fRing.desc[insertIndex].flags = VRING_DESC_F_INDIRECT;
338 	fRingHeadIndex = fRing.desc[insertIndex].next;
339 	fRingFree--;
340 
341 	UpdateAvailable(insertIndex);
342 
343 	NotifyHost();
344 
345 	return B_OK;
346 }
347 
348 
349 void
350 VirtioQueue::UpdateAvailable(uint16 index)
351 {
352 	CALLED();
353 	uint16 available = fRing.avail->idx & (fRingSize - 1);
354 	fRing.avail->ring[available] = index;
355 	fRing.avail->idx++;
356 }
357 
358 
359 uint16
360 VirtioQueue::QueueVector(uint16 insertIndex, struct vring_desc *desc,
361 	const physical_entry* vector, size_t readVectorCount,
362 	size_t writtenVectorCount)
363 {
364 	CALLED();
365 	uint16 index = insertIndex;
366 	size_t total = readVectorCount + writtenVectorCount;
367 	for (size_t i = 0; i < total; i++) {
368 		desc[index].addr = vector[i].address;
369 		desc[index].len =  vector[i].size;
370 		desc[index].flags = 0;
371 		if (i >= readVectorCount)
372 			desc[index].flags |= VRING_DESC_F_WRITE;
373 		if (i < total - 1)
374 			desc[index].flags |= VRING_DESC_F_NEXT;
375 		index = desc[index].next;
376 	}
377 
378 	return index;
379 }
380