xref: /haiku/src/add-ons/kernel/bus_managers/virtio/VirtioQueue.cpp (revision 4b7e219688450694efc9d1890f83f816758c16d3)
1 /*
2  * Copyright 2013, Jérôme Duval, korli@users.berlios.de.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include "VirtioPrivate.h"
8 
9 
10 static inline uint32
11 round_to_pagesize(uint32 size)
12 {
13 	return (size + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1);
14 }
15 
16 
17 area_id
18 alloc_mem(void **virt, phys_addr_t *phy, size_t size, uint32 protection,
19 	const char *name)
20 {
21 	physical_entry pe;
22 	void * virtadr;
23 	area_id areaid;
24 	status_t rv;
25 
26 	TRACE("allocating %ld bytes for %s\n", size, name);
27 
28 	size = round_to_pagesize(size);
29 	areaid = create_area(name, &virtadr, B_ANY_KERNEL_ADDRESS, size,
30 		B_CONTIGUOUS, protection);
31 	if (areaid < B_OK) {
32 		ERROR("couldn't allocate area %s\n", name);
33 		return B_ERROR;
34 	}
35 	rv = get_memory_map(virtadr, size, &pe, 1);
36 	if (rv < B_OK) {
37 		delete_area(areaid);
38 		ERROR("couldn't get mapping for %s\n", name);
39 		return B_ERROR;
40 	}
41 	if (virt)
42 		*virt = virtadr;
43 	if (phy)
44 		*phy = pe.address;
45 	TRACE("area = %" B_PRId32 ", size = %ld, virt = %p, phy = %#" B_PRIxPHYSADDR "\n",
46 		areaid, size, virtadr, pe.address);
47 	return areaid;
48 }
49 
50 
51 class TransferDescriptor {
52 public:
53 								TransferDescriptor(VirtioQueue* queue,
54 									uint16 indirectMaxSize);
55 								~TransferDescriptor();
56 
57 			status_t			InitCheck() { return fStatus; }
58 
59 			void				Callback();
60 			uint16				Size() { return fDescriptorCount; }
61 			void				SetTo(uint16 size,
62 									virtio_callback_func callback,
63 									void *callbackCookie);
64 			void				Unset();
65 			struct vring_desc*	Indirect() { return fIndirect; }
66 			phys_addr_t			PhysAddr() { return fPhysAddr; }
67 private:
68 			status_t			fStatus;
69 			VirtioQueue*		fQueue;
70 			void*				fCookie;
71 			virtio_callback_func fCallback;
72 
73 			struct vring_desc* 	fIndirect;
74 			size_t 				fAreaSize;
75 			area_id				fArea;
76 			phys_addr_t 		fPhysAddr;
77 			uint16				fDescriptorCount;
78 };
79 
80 
81 TransferDescriptor::TransferDescriptor(VirtioQueue* queue, uint16 indirectMaxSize)
82 	: fQueue(queue),
83 	fCookie(NULL),
84 	fCallback(NULL),
85 	fIndirect(NULL),
86 	fAreaSize(0),
87 	fArea(-1),
88 	fDescriptorCount(0)
89 {
90 	fStatus = B_OK;
91 	struct vring_desc* virtAddr;
92 	phys_addr_t physAddr;
93 
94 	if (indirectMaxSize > 0) {
95 		fAreaSize = indirectMaxSize * sizeof(struct vring_desc);
96 		fArea = alloc_mem((void **)&virtAddr, &physAddr, fAreaSize, 0,
97 			"virtqueue");
98 		if (fArea < B_OK) {
99 			fStatus = fArea;
100 			return;
101 		}
102 		memset(virtAddr, 0, fAreaSize);
103 		fIndirect = virtAddr;
104 		fPhysAddr = physAddr;
105 
106 		for (uint16 i = 0; i < indirectMaxSize - 1; i++)
107 			fIndirect[i].next = i + 1;
108 		fIndirect[indirectMaxSize - 1].next = UINT16_MAX;
109 	}
110 }
111 
112 
113 TransferDescriptor::~TransferDescriptor()
114 {
115 	if (fArea > B_OK)
116 		delete_area(fArea);
117 }
118 
119 
120 void
121 TransferDescriptor::Callback()
122 {
123 	if (fCallback != NULL)
124 		fCallback(fQueue->Device()->DriverCookie(), fCookie);
125 }
126 
127 
128 void
129 TransferDescriptor::SetTo(uint16 size, virtio_callback_func callback,
130 	void *callbackCookie)
131 {
132 	fCookie = callbackCookie;
133 	fCallback = callback;
134 	fDescriptorCount = size;
135 }
136 
137 
138 void
139 TransferDescriptor::Unset()
140 {
141 	fCookie = NULL;
142 	fCallback = NULL;
143 	fDescriptorCount = 0;
144 }
145 
146 
147 //	#pragma mark -
148 
149 
150 VirtioQueue::VirtioQueue(VirtioDevice* device, uint16 queueNumber,
151 	uint16 ringSize)
152 	:
153 	fDevice(device),
154 	fQueueNumber(queueNumber),
155 	fRingSize(ringSize),
156 	fRingFree(ringSize),
157 	fRingHeadIndex(0),
158 	fRingUsedIndex(0),
159 	fStatus(B_OK),
160 	fIndirectMaxSize(0)
161 {
162 	fDescriptors = new(std::nothrow) TransferDescriptor*[fRingSize];
163 	if (fDescriptors == NULL) {
164 		fStatus = B_NO_MEMORY;
165 		return;
166 	}
167 
168 	uint8* virtAddr;
169 	phys_addr_t physAddr;
170 	fAreaSize = vring_size(fRingSize, device->Alignment());
171 	fArea = alloc_mem((void **)&virtAddr, &physAddr, fAreaSize, 0,
172 		"virtqueue");
173 	if (fArea < B_OK) {
174 		fStatus = fArea;
175 		return;
176 	}
177 	memset(virtAddr, 0, fAreaSize);
178 	vring_init(&fRing, fRingSize, virtAddr, device->Alignment());
179 
180 	for (uint16 i = 0; i < fRingSize - 1; i++)
181 		fRing.desc[i].next = i + 1;
182 	fRing.desc[fRingSize - 1].next = UINT16_MAX;
183 
184 	if ((fDevice->Features() & VIRTIO_FEATURE_RING_INDIRECT_DESC) != 0)
185 		fIndirectMaxSize = 128;
186 
187 	for (uint16 i = 0; i < fRingSize; i++) {
188 		fDescriptors[i] = new TransferDescriptor(this, fIndirectMaxSize);
189 		if (fDescriptors[i] == NULL || fDescriptors[i]->InitCheck() != B_OK) {
190 			fStatus = B_NO_MEMORY;
191 			return;
192 		}
193 	}
194 
195 	DisableInterrupt();
196 
197 	device->SetupQueue(fQueueNumber, physAddr);
198 }
199 
200 
201 VirtioQueue::~VirtioQueue()
202 {
203 	delete_area(fArea);
204 	for (uint16 i = 0; i < fRingSize; i++) {
205 		delete fDescriptors[i];
206 	}
207 	delete[] fDescriptors;
208 }
209 
210 
211 void
212 VirtioQueue::DisableInterrupt()
213 {
214 	/*if ((fDevice->Features() & VIRTIO_FEATURE_RING_EVENT_IDX) == 0)
215 		fRing.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;*/
216 }
217 
218 
219 void
220 VirtioQueue::EnableInterrupt()
221 {
222 	/*if ((fDevice->Features() & VIRTIO_FEATURE_RING_EVENT_IDX) == 0)
223 		fRing.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;*/
224 }
225 
226 
227 void
228 VirtioQueue::NotifyHost()
229 {
230 	fDevice->NotifyQueue(fQueueNumber);
231 }
232 
233 
234 status_t
235 VirtioQueue::Interrupt()
236 {
237 	CALLED();
238 	DisableInterrupt();
239 
240 	while (fRingUsedIndex != fRing.used->idx)
241 		Finish();
242 
243 	EnableInterrupt();
244 	return B_OK;
245 }
246 
247 
248 void
249 VirtioQueue::Finish()
250 {
251 	TRACE("Finish() fRingUsedIndex: %u\n", fRingUsedIndex);
252 
253 	uint16 usedIndex = fRingUsedIndex++ & (fRingSize - 1);
254 	TRACE("Finish() usedIndex: %u\n", usedIndex);
255 	struct vring_used_elem *element = &fRing.used->ring[usedIndex];
256 	uint16 descriptorIndex = element->id;
257 		// uint32 length = element->len;
258 
259 	fDescriptors[descriptorIndex]->Callback();
260 	uint16 size = fDescriptors[descriptorIndex]->Size();
261 	fDescriptors[descriptorIndex]->Unset();
262 	fRingFree += size;
263 	size--;
264 
265 	uint16 index = descriptorIndex;
266 	while ((fRing.desc[index].flags & VRING_DESC_F_NEXT) != 0) {
267 		index = fRing.desc[index].next;
268 		size--;
269 	}
270 
271 	if (size > 0)
272 		panic("VirtioQueue::Finish() descriptors left %d\n", size);
273 
274 	fRing.desc[index].next = fRingHeadIndex;
275 	fRingHeadIndex = descriptorIndex;
276 	TRACE("Finish() fRingHeadIndex: %u\n", fRingHeadIndex);
277 }
278 
279 
280 status_t
281 VirtioQueue::QueueRequest(const physical_entry* vector, size_t readVectorCount,
282 	size_t writtenVectorCount, virtio_callback_func callback,
283 	void *callbackCookie)
284 {
285 	CALLED();
286 	size_t count = readVectorCount + writtenVectorCount;
287 	if (count < 1)
288 		return B_BAD_VALUE;
289 	if ((fDevice->Features() & VIRTIO_FEATURE_RING_INDIRECT_DESC) != 0) {
290 		return QueueRequestIndirect(vector, readVectorCount,
291 			writtenVectorCount, callback, callbackCookie);
292 	}
293 
294 	if (count > fRingFree)
295 		return B_BUSY;
296 
297 	uint16 insertIndex = fRingHeadIndex;
298 	fDescriptors[insertIndex]->SetTo(count, callback,
299 		callbackCookie);
300 
301 	// enqueue
302 	uint16 index = QueueVector(insertIndex, fRing.desc, vector,
303 		readVectorCount, writtenVectorCount);
304 
305 	fRingHeadIndex = index;
306 	fRingFree -= count;
307 
308 	UpdateAvailable(insertIndex);
309 
310 	NotifyHost();
311 
312 	return B_OK;
313 }
314 
315 
316 status_t
317 VirtioQueue::QueueRequestIndirect(const physical_entry* vector,
318 	size_t readVectorCount,	size_t writtenVectorCount,
319 	virtio_callback_func callback, void *callbackCookie)
320 {
321 	CALLED();
322 	size_t count = readVectorCount + writtenVectorCount;
323 	if (count > fRingFree || count > fIndirectMaxSize)
324 		return B_BUSY;
325 
326 	uint16 insertIndex = fRingHeadIndex;
327 	fDescriptors[insertIndex]->SetTo(1, callback,
328 		callbackCookie);
329 
330 	// enqueue
331 	uint16 index = QueueVector(0, fDescriptors[insertIndex]->Indirect(),
332 		vector, readVectorCount, writtenVectorCount);
333 
334 	fRing.desc[insertIndex].addr = fDescriptors[insertIndex]->PhysAddr();
335 	fRing.desc[insertIndex].len = index * sizeof(struct vring_desc);
336 	fRing.desc[insertIndex].flags = VRING_DESC_F_INDIRECT;
337 	fRingHeadIndex = fRing.desc[insertIndex].next;
338 	fRingFree--;
339 
340 	UpdateAvailable(insertIndex);
341 
342 	NotifyHost();
343 
344 	return B_OK;
345 }
346 
347 
348 void
349 VirtioQueue::UpdateAvailable(uint16 index)
350 {
351 	CALLED();
352 	uint16 available = fRing.avail->idx & (fRingSize - 1);
353 	fRing.avail->ring[available] = index;
354 	fRing.avail->idx++;
355 }
356 
357 
358 uint16
359 VirtioQueue::QueueVector(uint16 insertIndex, struct vring_desc *desc,
360 	const physical_entry* vector, size_t readVectorCount,
361 	size_t writtenVectorCount)
362 {
363 	CALLED();
364 	uint16 index = insertIndex;
365 	size_t total = readVectorCount + writtenVectorCount;
366 	for (size_t i = 0; i < total; i++) {
367 		desc[index].addr = vector[i].address;
368 		desc[index].len =  vector[i].size;
369 		desc[index].flags = 0;
370 		if (i >= readVectorCount)
371 			desc[index].flags |= VRING_DESC_F_WRITE;
372 		if (i < total - 1)
373 			desc[index].flags |= VRING_DESC_F_NEXT;
374 		index = desc[index].next;
375 	}
376 
377 	return index;
378 }
379