1 #include "gfx_util.h" 2 3 #include <strings.h> 4 #include <stdio.h> 5 6 #include "CpuCapabilities.h" 7 #include "gfx_conv_c.h" 8 #include "gfx_conv_mmx.h" 9 10 11 // ref docs 12 // http://www.joemaller.com/fcp/fxscript_yuv_color.shtml 13 14 15 #if DEBUG 16 #define TRACE(a...) printf(a) 17 #else 18 #define TRACE(a...) 19 #endif 20 21 22 //! This function will try to find the best colorspaces for both the ff-codec 23 // and the Media Kit sides. 24 gfx_convert_func 25 resolve_colorspace(color_space colorSpace, PixelFormat pixelFormat, int width, 26 int height) 27 { 28 CPUCapabilities cpu; 29 30 switch (colorSpace) { 31 case B_RGB32: 32 // Planar Formats 33 if (pixelFormat == PIX_FMT_YUV410P) { 34 TRACE("resolve_colorspace: gfx_conv_yuv410p_rgb32_c\n"); 35 return gfx_conv_yuv410p_rgb32_c; 36 } 37 38 if (pixelFormat == PIX_FMT_YUV411P) { 39 TRACE("resolve_colorspace: gfx_conv_yuv411p_rgb32_c\n"); 40 return gfx_conv_yuv411p_rgb32_c; 41 } 42 43 if (pixelFormat == PIX_FMT_YUV420P 44 || pixelFormat == PIX_FMT_YUVJ420P) { 45 #ifndef __x86_64__ 46 if (cpu.HasSSSE3() && width % 8 == 0 && height % 2 == 0) { 47 TRACE("resolve_colorspace: gfx_conv_yuv420p_rgba32_ssse3\n"); 48 return gfx_conv_yuv420p_rgba32_ssse3; 49 } else if (cpu.HasSSE2() && width % 8 == 0 && height % 2 == 0) { 50 TRACE("resolve_colorspace: gfx_conv_yuv420p_rgba32_sse2\n"); 51 return gfx_conv_yuv420p_rgba32_sse2; 52 } else if (cpu.HasSSE1() && width % 4 == 0 53 && height % 2 == 0) { 54 TRACE("resolve_colorspace: gfx_conv_yuv420p_rgba32_sse\n"); 55 return gfx_conv_yuv420p_rgba32_sse; 56 } 57 #endif 58 TRACE("resolve_colorspace: gfx_conv_YCbCr420p_RGB32_c\n"); 59 return gfx_conv_YCbCr420p_RGB32_c; 60 } 61 62 if (pixelFormat == PIX_FMT_YUV422P 63 || pixelFormat == PIX_FMT_YUVJ422P) { 64 #ifndef __x86_64__ 65 if (cpu.HasSSSE3() && width % 8 == 0) { 66 TRACE("resolve_colorspace: gfx_conv_yuv422p_RGB32_ssse3\n"); 67 return gfx_conv_yuv422p_rgba32_ssse3; 68 } else if (cpu.HasSSE2() && width % 8 == 0) { 69 TRACE("resolve_colorspace: gfx_conv_yuv422p_RGB32_sse2\n"); 70 return gfx_conv_yuv422p_rgba32_sse2; 71 } else if (cpu.HasSSE1() && width % 4 == 0) { 72 TRACE("resolve_colorspace: gfx_conv_yuv422p_RGB32_sse\n"); 73 return gfx_conv_yuv422p_rgba32_sse; 74 } 75 #endif 76 TRACE("resolve_colorspace: gfx_conv_YCbCr422p_RGB32_c\n"); 77 return gfx_conv_YCbCr422_RGB32_c; 78 } 79 80 // Packed Formats 81 if (pixelFormat == PIX_FMT_YUYV422) { 82 #ifndef __x86_64__ 83 if (cpu.HasSSSE3() && width % 8 == 0) { 84 return gfx_conv_yuv422_rgba32_ssse3; 85 } else if (cpu.HasSSE2() && width % 8 == 0) { 86 return gfx_conv_yuv422_rgba32_sse2; 87 } else if (cpu.HasSSE1() && width % 4 == 0 88 && height % 2 == 0) { 89 return gfx_conv_yuv422_rgba32_sse; 90 } 91 #endif 92 return gfx_conv_YCbCr422_RGB32_c; 93 } 94 95 TRACE("resolve_colorspace: %s => B_RGB32: NULL\n", 96 pixfmt_to_string(pixelFormat)); 97 return NULL; 98 99 case B_RGB24_BIG: 100 TRACE("resolve_colorspace: %s => B_RGB24_BIG: NULL\n", 101 pixfmt_to_string(pixelFormat)); 102 return NULL; 103 104 case B_RGB24: 105 TRACE("resolve_colorspace: %s => B_RGB24: NULL\n", 106 pixfmt_to_string(pixelFormat)); 107 return NULL; 108 109 case B_YCbCr422: 110 if (pixelFormat == PIX_FMT_YUV410P) { 111 TRACE("resolve_colorspace: gfx_conv_yuv410p_ycbcr422_c\n"); 112 return gfx_conv_yuv410p_ycbcr422_c; 113 } 114 115 if (pixelFormat == PIX_FMT_YUV411P) { 116 TRACE("resolve_colorspace: gfx_conv_yuv411p_ycbcr422_c\n"); 117 return gfx_conv_yuv411p_ycbcr422_c; 118 } 119 120 if (pixelFormat == PIX_FMT_YUV420P 121 || pixelFormat == PIX_FMT_YUVJ420P) { 122 TRACE("resolve_colorspace: gfx_conv_yuv420p_ycbcr422_c\n"); 123 return gfx_conv_yuv420p_ycbcr422_c; 124 } 125 126 if (pixelFormat == PIX_FMT_YUYV422) { 127 TRACE("resolve_colorspace: PIX_FMT_YUV422 => B_YCbCr422: " 128 "gfx_conv_null\n"); 129 return gfx_conv_null; 130 } 131 132 TRACE("resolve_colorspace: %s => B_YCbCr422: NULL\n", 133 pixfmt_to_string(pixelFormat)); 134 return NULL; 135 136 default: 137 TRACE("resolve_colorspace: default: NULL!!!\n"); 138 return NULL; 139 } 140 } 141 142 143 const char* 144 pixfmt_to_string(int pixFormat) 145 { 146 switch (pixFormat) { 147 case PIX_FMT_NONE: 148 return "PIX_FMT_NONE"; 149 150 case PIX_FMT_YUV420P: 151 // planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples) 152 return "PIX_FMT_YUV420P"; 153 154 case PIX_FMT_YUYV422: 155 // packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr 156 return "PIX_FMT_YUYV422"; 157 158 case PIX_FMT_RGB24: 159 // packed RGB 8:8:8, 24bpp, RGBRGB... 160 return "PIX_FMT_RGB24"; 161 162 case PIX_FMT_BGR24: 163 // packed RGB 8:8:8, 24bpp, BGRBGR... 164 return "PIX_FMT_BGR24"; 165 166 case PIX_FMT_YUV422P: 167 // planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples) 168 return "PIX_FMT_YUV422P"; 169 170 case PIX_FMT_YUV444P: 171 // planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples) 172 return "PIX_FMT_YUV444P"; 173 174 case PIX_FMT_RGB32: 175 // packed RGB 8:8:8, 32bpp, (msb)8A 8R 8G 8B(lsb), in CPU 176 // endianness 177 return "PIX_FMT_RGB32"; 178 179 case PIX_FMT_YUV410P: 180 // planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples) 181 return "PIX_FMT_YUV410P"; 182 183 case PIX_FMT_YUV411P: 184 // planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) 185 return "PIX_FMT_YUV411P"; 186 187 case PIX_FMT_RGB565: 188 // packed RGB 5:6:5, 16bpp, (msb)5R 6G 5B(lsb), in CPU endianness 189 return "PIX_FMT_RGB565"; 190 191 case PIX_FMT_RGB555: 192 // packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), in CPU 193 // endianness, most significant bit to 0 194 return "PIX_FMT_RGB555"; 195 196 case PIX_FMT_GRAY8: 197 // Y, 8bpp 198 return "PIX_FMT_GRAY8"; 199 200 case PIX_FMT_MONOWHITE: 201 // Y, 1bpp, 0 is white, 1 is black 202 return "PIX_FMT_MONOWHITE"; 203 204 case PIX_FMT_MONOBLACK: 205 // Y, 1bpp, 0 is black, 1 is white 206 return "PIX_FMT_MONOBLACK"; 207 208 case PIX_FMT_PAL8: 209 // 8 bit with PIX_FMT_RGB32 palette 210 return "PIX_FMT_PAL8"; 211 212 case PIX_FMT_YUVJ420P: 213 // planar YUV 4:2:0, 12bpp, full scale (JPEG) 214 return "PIX_FMT_YUVJ420P - YUV420P (Jpeg)"; 215 216 case PIX_FMT_YUVJ422P: 217 // planar YUV 4:2:2, 16bpp, full scale (JPEG) 218 return "PIX_FMT_YUVJ422P - YUV422P (Jpeg)"; 219 220 case PIX_FMT_YUVJ444P: 221 // planar YUV 4:4:4, 24bpp, full scale (JPEG) 222 return "PIX_FMT_YUVJ444P"; 223 224 case PIX_FMT_XVMC_MPEG2_MC: 225 // XVideo Motion Acceleration via common packet passing 226 return "PIX_FMT_XVMC_MPEG2_MC"; 227 228 case PIX_FMT_XVMC_MPEG2_IDCT: 229 return "PIX_FMT_XVMC_MPEG2_IDCT"; 230 case PIX_FMT_UYVY422: 231 // packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1 232 return "PIX_FMT_UYVY422"; 233 234 case PIX_FMT_UYYVYY411: 235 // packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3 236 return "PIX_FMT_UYYVYY411"; 237 238 case PIX_FMT_BGR32: 239 // packed RGB 8:8:8, 32bpp, (msb)8A 8B 8G 8R(lsb), in CPU 240 // endianness 241 return "PIX_FMT_BGR32"; 242 243 case PIX_FMT_BGR565: 244 // packed RGB 5:6:5, 16bpp, (msb)5B 6G 5R(lsb), in CPU endianness 245 return "PIX_FMT_BGR565"; 246 247 case PIX_FMT_BGR555: 248 // packed RGB 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), in CPU 249 // endianness, most significant bit to 1 250 return "PIX_FMT_BGR555"; 251 252 case PIX_FMT_BGR8: 253 // packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb) 254 return "PIX_FMT_BGR8"; 255 256 case PIX_FMT_BGR4: 257 // packed RGB 1:2:1, 4bpp, (msb)1B 2G 1R(lsb) 258 return "PIX_FMT_BGR4"; 259 260 case PIX_FMT_BGR4_BYTE: 261 // packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb) 262 return "PIX_FMT_BGR4_BYTE"; 263 264 case PIX_FMT_RGB8: 265 // packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb) 266 return "PIX_FMT_RGB8"; 267 268 case PIX_FMT_RGB4: 269 // packed RGB 1:2:1, 4bpp, (msb)1R 2G 1B(lsb) 270 return "PIX_FMT_RGB4"; 271 272 case PIX_FMT_RGB4_BYTE: 273 // packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb) 274 return "PIX_FMT_RGB4_BYTE"; 275 276 case PIX_FMT_NV12: 277 // planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 for UV 278 return "PIX_FMT_NV12"; 279 280 case PIX_FMT_NV21: 281 // as above, but U and V bytes are swapped 282 return "PIX_FMT_NV21"; 283 284 case PIX_FMT_RGB32_1: 285 // packed RGB 8:8:8, 32bpp, (msb)8R 8G 8B 8A(lsb), in CPU 286 // endianness 287 return "PIX_FMT_RGB32_1"; 288 289 case PIX_FMT_BGR32_1: 290 // packed RGB 8:8:8, 32bpp, (msb)8B 8G 8R 8A(lsb), in CPU 291 // endianness 292 return "PIX_FMT_BGR32_1"; 293 294 case PIX_FMT_GRAY16BE: 295 // Y, 16bpp, big-endian 296 return "PIX_FMT_GRAY16BE"; 297 298 case PIX_FMT_GRAY16LE: 299 // Y, 16bpp, little-endian 300 return "PIX_FMT_GRAY16LE"; 301 302 case PIX_FMT_YUV440P: 303 // planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples) 304 return "PIX_FMT_YUV440P"; 305 306 case PIX_FMT_YUVJ440P: 307 // planar YUV 4:4:0 full scale (JPEG) 308 return "PIX_FMT_YUVJ440P - YUV440P (Jpeg)"; 309 310 case PIX_FMT_YUVA420P: 311 // planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A 312 // samples) 313 return "PIX_FMT_YUVA420P - YUV420P (Alpha)"; 314 315 case PIX_FMT_VDPAU_H264: 316 // H.264 HW decoding with VDPAU, data[0] contains a 317 // vdpau_render_state struct which contains the bitstream of the 318 // slices as well as various fields extracted from headers 319 return "PIX_FMT_VDPAU_H264"; 320 321 case PIX_FMT_VDPAU_MPEG1: 322 // MPEG-1 HW decoding with VDPAU, data[0] contains a 323 // vdpau_render_state struct which contains the bitstream of the 324 // slices as well as various fields extracted from headers 325 return "PIX_FMT_VDPAU_MPEG1"; 326 327 case PIX_FMT_VDPAU_MPEG2: 328 // MPEG-2 HW decoding with VDPAU, data[0] contains a 329 // vdpau_render_state struct which contains the bitstream of the 330 // slices as well as various fields extracted from headers 331 return "PIX_FMT_VDPAU_MPEG2"; 332 333 case PIX_FMT_VDPAU_WMV3: 334 // WMV3 HW decoding with VDPAU, data[0] contains a 335 // vdpau_render_state struct which contains the bitstream of the 336 // slices as well as various fields extracted from headers 337 return "PIX_FMT_VDPAU_WMV3"; 338 339 case PIX_FMT_VDPAU_VC1: 340 // VC-1 HW decoding with VDPAU, data[0] contains a 341 // vdpau_render_state struct which contains the bitstream of the 342 // slices as well as various fields extracted from headers 343 return "PIX_FMT_VDPAU_VC1"; 344 345 case PIX_FMT_RGB48BE: 346 // packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, big-endian 347 return "PIX_FMT_RGB48BE"; 348 349 case PIX_FMT_RGB48LE: 350 // packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, little-endian 351 return "PIX_FMT_RGB48LE"; 352 353 case PIX_FMT_VAAPI_MOCO: 354 // HW acceleration through VA API at motion compensation 355 // entry-point, Picture.data[0] contains a vaapi_render_state 356 // struct which contains macroblocks as well as various fields 357 // extracted from headers 358 return "PIX_FMT_VAAPI_MOCO"; 359 360 case PIX_FMT_VAAPI_IDCT: 361 // HW acceleration through VA API at IDCT entry-point, 362 // Picture.data[0] contains a vaapi_render_state struct which 363 // contains fields extracted from headers 364 return "PIX_FMT_VAAPI_IDCT"; 365 366 case PIX_FMT_VAAPI_VLD: 367 // HW decoding through VA API, Picture.data[0] contains a 368 // vaapi_render_state struct which contains the bitstream of the 369 // slices as well as various fields extracted from headers 370 return "PIX_FMT_VAAPI_VLD"; 371 372 default: 373 return "(unknown)"; 374 } 375 } 376 377 378 color_space 379 pixfmt_to_colorspace(int pixFormat) 380 { 381 switch(pixFormat) { 382 default: 383 TRACE("No BE API colorspace definition for pixel format " 384 "\"%s\".\n", pixfmt_to_string(pixFormat)); 385 // Supposed to fall through. 386 case PIX_FMT_NONE: 387 return B_NO_COLOR_SPACE; 388 389 // NOTE: See pixfmt_to_colorspace() for what these are. 390 case PIX_FMT_YUV420P: 391 return B_YUV420; 392 case PIX_FMT_YUYV422: 393 return B_YUV422; 394 case PIX_FMT_RGB24: 395 return B_RGB24_BIG; 396 case PIX_FMT_BGR24: 397 return B_RGB24; 398 case PIX_FMT_YUV422P: 399 return B_YUV422; 400 case PIX_FMT_YUV444P: 401 return B_YUV444; 402 case PIX_FMT_RGB32: 403 return B_RGBA32_BIG; 404 case PIX_FMT_YUV410P: 405 return B_YUV9; 406 case PIX_FMT_YUV411P: 407 return B_YUV12; 408 case PIX_FMT_RGB565: 409 return B_RGB16_BIG; 410 case PIX_FMT_RGB555: 411 return B_RGB15_BIG; 412 case PIX_FMT_GRAY8: 413 return B_GRAY8; 414 case PIX_FMT_MONOBLACK: 415 return B_GRAY1; 416 case PIX_FMT_PAL8: 417 return B_CMAP8; 418 case PIX_FMT_BGR32: 419 return B_RGB32; 420 case PIX_FMT_BGR565: 421 return B_RGB16; 422 case PIX_FMT_BGR555: 423 return B_RGB15; 424 } 425 } 426 427 428 PixelFormat 429 colorspace_to_pixfmt(color_space format) 430 { 431 switch(format) { 432 default: 433 case B_NO_COLOR_SPACE: 434 return PIX_FMT_NONE; 435 436 // NOTE: See pixfmt_to_colorspace() for what these are. 437 case B_YUV420: 438 return PIX_FMT_YUV420P; 439 case B_YUV422: 440 return PIX_FMT_YUV422P; 441 case B_RGB24_BIG: 442 return PIX_FMT_RGB24; 443 case B_RGB24: 444 return PIX_FMT_BGR24; 445 case B_YUV444: 446 return PIX_FMT_YUV444P; 447 case B_RGBA32_BIG: 448 case B_RGB32_BIG: 449 return PIX_FMT_BGR32; 450 case B_YUV9: 451 return PIX_FMT_YUV410P; 452 case B_YUV12: 453 return PIX_FMT_YUV411P; 454 // TODO: YCbCr color spaces! These are not the same as YUV! 455 case B_RGB16_BIG: 456 return PIX_FMT_RGB565; 457 case B_RGB15_BIG: 458 return PIX_FMT_RGB555; 459 case B_GRAY8: 460 return PIX_FMT_GRAY8; 461 case B_GRAY1: 462 return PIX_FMT_MONOBLACK; 463 case B_CMAP8: 464 return PIX_FMT_PAL8; 465 case B_RGBA32: 466 case B_RGB32: 467 return PIX_FMT_RGB32; 468 case B_RGB16: 469 return PIX_FMT_BGR565; 470 case B_RGB15: 471 return PIX_FMT_BGR555; 472 } 473 } 474 475 476 #define BEGIN_TAG "\033[31m" 477 #define END_TAG "\033[0m" 478 479 void 480 dump_ffframe(AVFrame* frame, const char* name) 481 { 482 const char* picttypes[] = {"no pict type", "intra", "predicted", 483 "bidir pre", "s(gmc)-vop"}; 484 printf(BEGIN_TAG"AVFrame(%s) pts:%-10lld cnum:%-5d dnum:%-5d %s%s, " 485 " ]\n"END_TAG, 486 name, 487 frame->pts, 488 frame->coded_picture_number, 489 frame->display_picture_number, 490 // frame->quality, 491 frame->key_frame?"keyframe, ":"", 492 picttypes[frame->pict_type]); 493 // printf(BEGIN_TAG"\t\tlinesize[] = {%ld, %ld, %ld, %ld}\n"END_TAG, 494 // frame->linesize[0], frame->linesize[1], frame->linesize[2], 495 // frame->linesize[3]); 496 } 497 498