xref: /haiku/src/add-ons/accelerants/intel_extreme/engine.cpp (revision 6f80a9801fedbe7355c4360bd204ba746ec3ec2d)
1 /*
2  * Copyright 2006-2007, Haiku, Inc. All Rights Reserved.
3  * Distributed under the terms of the MIT License.
4  *
5  * Authors:
6  *		Axel Dörfler, axeld@pinc-software.de
7  */
8 
9 
10 #include <Debug.h>
11 
12 #include "accelerant.h"
13 #include "accelerant_protos.h"
14 #include "commands.h"
15 
16 
17 #undef TRACE
18 //#define TRACE_ENGINE
19 #ifdef TRACE_ENGINE
20 #	define TRACE(x...) _sPrintf("intel_extreme: " x)
21 #else
22 #	define TRACE(x...)
23 #endif
24 
25 #define ERROR(x...) _sPrintf("intel_extreme: " x)
26 #define CALLED(x...) TRACE("CALLED %s\n", __PRETTY_FUNCTION__)
27 
28 
29 static engine_token sEngineToken = {1, 0 /*B_2D_ACCELERATION*/, NULL};
30 
31 
32 QueueCommands::QueueCommands(ring_buffer &ring)
33 	:
34 	fRingBuffer(ring)
35 {
36 	acquire_lock(&fRingBuffer.lock);
37 }
38 
39 
40 QueueCommands::~QueueCommands()
41 {
42 	if (fRingBuffer.position & 0x07) {
43 		// make sure the command is properly aligned
44 		Write(COMMAND_NOOP);
45 	}
46 
47 	// We must make sure memory is written back in case the ring buffer
48 	// is in write combining mode - releasing the lock does this, as the
49 	// buffer is flushed on a locked memory operation (which is what this
50 	// benaphore does), but it must happen before writing the new tail...
51 	int32 flush;
52 	atomic_add(&flush, 1);
53 
54 	write32(fRingBuffer.register_base + RING_BUFFER_TAIL, fRingBuffer.position);
55 
56 	release_lock(&fRingBuffer.lock);
57 }
58 
59 
60 void
61 QueueCommands::Put(struct command &command, size_t size)
62 {
63 	uint32 count = size / sizeof(uint32);
64 	uint32 *data = command.Data();
65 
66 	MakeSpace(count);
67 
68 	for (uint32 i = 0; i < count; i++) {
69 		Write(data[i]);
70 	}
71 }
72 
73 
74 void
75 QueueCommands::PutFlush()
76 {
77 	MakeSpace(2);
78 
79 	Write(COMMAND_FLUSH);
80 	Write(COMMAND_NOOP);
81 }
82 
83 
84 void
85 QueueCommands::PutWaitFor(uint32 event)
86 {
87 	MakeSpace(2);
88 
89 	Write(COMMAND_WAIT_FOR_EVENT | event);
90 	Write(COMMAND_NOOP);
91 }
92 
93 
94 void
95 QueueCommands::PutOverlayFlip(uint32 mode, bool updateCoefficients)
96 {
97 	MakeSpace(2);
98 
99 	Write(COMMAND_OVERLAY_FLIP | mode);
100 
101 	uint32 registers;
102 	// G33 does not need a physical address for the overlay registers
103 	if (intel_uses_physical_overlay(*gInfo->shared_info))
104 		registers = gInfo->shared_info->physical_overlay_registers;
105 	else
106 		registers = gInfo->shared_info->overlay_offset;
107 
108 	Write(registers | (updateCoefficients ? OVERLAY_UPDATE_COEFFICIENTS : 0));
109 }
110 
111 
112 void
113 QueueCommands::MakeSpace(uint32 size)
114 {
115 	ASSERT((size & 1) == 0);
116 
117 	size *= sizeof(uint32);
118 	bigtime_t start = system_time();
119 
120 	while (fRingBuffer.space_left < size) {
121 		// wait until more space is free
122 		uint32 head = read32(fRingBuffer.register_base + RING_BUFFER_HEAD)
123 			& INTEL_RING_BUFFER_HEAD_MASK;
124 
125 		if (head <= fRingBuffer.position)
126 			head += fRingBuffer.size;
127 
128 		fRingBuffer.space_left = head - fRingBuffer.position;
129 
130 		if (fRingBuffer.space_left < size) {
131 			if (system_time() > start + 1000000LL) {
132 				ERROR("engine stalled, head %" B_PRIx32 "\n", head);
133 				break;
134 			}
135 			spin(10);
136 		}
137 	}
138 
139 	fRingBuffer.space_left -= size;
140 }
141 
142 
143 void
144 QueueCommands::Write(uint32 data)
145 {
146 	uint32 *target = (uint32 *)(fRingBuffer.base + fRingBuffer.position);
147 	*target = data;
148 
149 	fRingBuffer.position = (fRingBuffer.position + sizeof(uint32))
150 		& (fRingBuffer.size - 1);
151 }
152 
153 
154 //	#pragma mark -
155 
156 
157 void
158 uninit_ring_buffer(ring_buffer &ringBuffer)
159 {
160 	uninit_lock(&ringBuffer.lock);
161 	write32(ringBuffer.register_base + RING_BUFFER_CONTROL, 0);
162 }
163 
164 
165 void
166 setup_ring_buffer(ring_buffer &ringBuffer, const char* name)
167 {
168 	TRACE("Setup ring buffer %s, offset %lx, size %lx\n", name,
169 		ringBuffer.offset, ringBuffer.size);
170 
171 	if (init_lock(&ringBuffer.lock, name) < B_OK) {
172 		// disable ring buffer
173 		ringBuffer.size = 0;
174 		return;
175 	}
176 
177 	uint32 ring = ringBuffer.register_base;
178 	ringBuffer.position = 0;
179 	ringBuffer.space_left = ringBuffer.size;
180 
181 	write32(ring + RING_BUFFER_TAIL, 0);
182 	write32(ring + RING_BUFFER_START, ringBuffer.offset);
183 	write32(ring + RING_BUFFER_CONTROL,
184 		((ringBuffer.size - B_PAGE_SIZE) & INTEL_RING_BUFFER_SIZE_MASK)
185 		| INTEL_RING_BUFFER_ENABLED);
186 }
187 
188 
189 //	#pragma mark - engine management
190 
191 
192 /*! Return number of hardware engines */
193 uint32
194 intel_accelerant_engine_count(void)
195 {
196 	CALLED();
197 	return 1;
198 }
199 
200 
201 status_t
202 intel_acquire_engine(uint32 capabilities, uint32 maxWait, sync_token* syncToken,
203 	engine_token** _engineToken)
204 {
205 	CALLED();
206 	*_engineToken = &sEngineToken;
207 
208 	if (acquire_lock(&gInfo->shared_info->engine_lock) != B_OK)
209 		return B_ERROR;
210 
211 	if (syncToken)
212 		intel_sync_to_token(syncToken);
213 
214 	return B_OK;
215 }
216 
217 
218 status_t
219 intel_release_engine(engine_token* engineToken, sync_token* syncToken)
220 {
221 	CALLED();
222 	if (syncToken != NULL)
223 		syncToken->engine_id = engineToken->engine_id;
224 
225 	release_lock(&gInfo->shared_info->engine_lock);
226 	return B_OK;
227 }
228 
229 
230 void
231 intel_wait_engine_idle(void)
232 {
233 	CALLED();
234 
235 	// Skylake acc engine not yet functional (stalls)
236 	if (gInfo->shared_info->device_type.InFamily(INTEL_FAMILY_LAKE)
237 			|| gInfo->shared_info->device_type.InFamily(INTEL_FAMILY_SOC0)) {
238 		return;
239 	}
240 
241 	{
242 		QueueCommands queue(gInfo->shared_info->primary_ring_buffer);
243 		queue.PutFlush();
244 	}
245 
246 	// TODO: this should only be a temporary solution!
247 	// a better way to do this would be to acquire the engine's lock and
248 	// sync to the latest token
249 
250 	bigtime_t start = system_time();
251 
252 	ring_buffer &ring = gInfo->shared_info->primary_ring_buffer;
253 	uint32 head, tail;
254 	while (true) {
255 		head = read32(ring.register_base + RING_BUFFER_HEAD)
256 			& INTEL_RING_BUFFER_HEAD_MASK;
257 		tail = read32(ring.register_base + RING_BUFFER_TAIL)
258 			& INTEL_RING_BUFFER_HEAD_MASK;
259 
260 		if (head == tail)
261 			break;
262 
263 		if (system_time() > start + 1000000LL) {
264 			// the engine seems to be locked up!
265 			ERROR("engine locked up, head %" B_PRIx32 "!\n", head);
266 			break;
267 		}
268 
269 		spin(10);
270 	}
271 }
272 
273 
274 status_t
275 intel_get_sync_token(engine_token* engineToken, sync_token* syncToken)
276 {
277 	CALLED();
278 	return B_OK;
279 }
280 
281 
282 status_t
283 intel_sync_to_token(sync_token* syncToken)
284 {
285 	CALLED();
286 	intel_wait_engine_idle();
287 	return B_OK;
288 }
289 
290 
291 //	#pragma mark - engine acceleration
292 
293 
294 void
295 intel_screen_to_screen_blit(engine_token* token, blit_params* params,
296 	uint32 count)
297 {
298 	QueueCommands queue(gInfo->shared_info->primary_ring_buffer);
299 
300 	for (uint32 i = 0; i < count; i++) {
301 		xy_source_blit_command blit;
302 		blit.source_left = params[i].src_left;
303 		blit.source_top = params[i].src_top;
304 		blit.dest_left = params[i].dest_left;
305 		blit.dest_top = params[i].dest_top;
306 		blit.dest_right = params[i].dest_left + params[i].width + 1;
307 		blit.dest_bottom = params[i].dest_top + params[i].height + 1;
308 
309 		queue.Put(blit, sizeof(blit));
310 	}
311 }
312 
313 
314 void
315 intel_fill_rectangle(engine_token* token, uint32 color,
316 	fill_rect_params* params, uint32 count)
317 {
318 	QueueCommands queue(gInfo->shared_info->primary_ring_buffer);
319 
320 	for (uint32 i = 0; i < count; i++) {
321 		xy_color_blit_command blit(false);
322 		blit.dest_left = params[i].left;
323 		blit.dest_top = params[i].top;
324 		blit.dest_right = params[i].right + 1;
325 		blit.dest_bottom = params[i].bottom + 1;
326 		blit.color = color;
327 
328 		queue.Put(blit, sizeof(blit));
329 	}
330 }
331 
332 
333 void
334 intel_invert_rectangle(engine_token* token, fill_rect_params* params,
335 	uint32 count)
336 {
337 	QueueCommands queue(gInfo->shared_info->primary_ring_buffer);
338 
339 	for (uint32 i = 0; i < count; i++) {
340 		xy_color_blit_command blit(true);
341 		blit.dest_left = params[i].left;
342 		blit.dest_top = params[i].top;
343 		blit.dest_right = params[i].right + 1;
344 		blit.dest_bottom = params[i].bottom + 1;
345 		blit.color = 0xffffffff;
346 
347 		queue.Put(blit, sizeof(blit));
348 	}
349 }
350 
351 
352 void
353 intel_fill_span(engine_token* token, uint32 color, uint16* _params,
354 	uint32 count)
355 {
356 	struct params {
357 		uint16	top;
358 		uint16	left;
359 		uint16	right;
360 	} *params = (struct params*)_params;
361 
362 	QueueCommands queue(gInfo->shared_info->primary_ring_buffer);
363 
364 	xy_setup_mono_pattern_command setup;
365 	setup.background_color = color;
366 	setup.pattern = 0;
367 	queue.Put(setup, sizeof(setup));
368 
369 	for (uint32 i = 0; i < count; i++) {
370 		xy_scanline_blit_command blit;
371 		blit.dest_left = params[i].left;
372 		blit.dest_top = params[i].top;
373 		blit.dest_right = params[i].right;
374 		blit.dest_bottom = params[i].top;
375 	}
376 }
377