1 #include <strings.h> 2 #include <stdio.h> 3 #include "gfx_util.h" 4 #include "gfx_conv_c.h" 5 #include "gfx_conv_mmx.h" 6 #include "CpuCapabilities.h" 7 8 /* 9 * ref docs 10 * http://www.joemaller.com/fcp/fxscript_yuv_color.shtml 11 */ 12 13 #if 1 14 #define TRACE(a...) printf(a) 15 #else 16 #define TRACE(a...) 17 #endif 18 19 // this function will try to find the best colorspaces for both the ff-codec and 20 // the Media Kit sides. 21 gfx_convert_func resolve_colorspace(color_space colorSpace, PixelFormat pixelFormat) 22 { 23 CPUCapabilities cpu; 24 25 switch (colorSpace) 26 { 27 case B_RGB32: 28 if (pixelFormat == PIX_FMT_YUV410P) { 29 // if (cpu.HasMMX()) { 30 // TRACE("resolve_colorspace: gfx_conv_yuv410p_rgb32_mmx\n"); 31 // return gfx_conv_yuv410p_rgb32_mmx; 32 // } else { 33 TRACE("resolve_colorspace: gfx_conv_yuv410p_rgb32_c\n"); 34 return gfx_conv_yuv410p_rgb32_c; 35 // } 36 } 37 38 if (pixelFormat == PIX_FMT_YUV411P) { 39 // if (cpu.HasMMX()) { 40 // TRACE("resolve_colorspace: gfx_conv_yuv411p_rgb32_mmx\n"); 41 // return gfx_conv_yuv411p_rgb32_mmx; 42 // } else { 43 TRACE("resolve_colorspace: gfx_conv_yuv411p_rgb32_c\n"); 44 return gfx_conv_yuv411p_rgb32_c; 45 // } 46 } 47 48 if (pixelFormat == PIX_FMT_YUV420P || pixelFormat == PIX_FMT_YUVJ420P) { 49 if (cpu.HasSSE2()) { 50 TRACE("resolve_colorspace: gfx_conv_yuv420p_rgba32_sse2\n"); 51 return gfx_conv_yuv420p_rgba32_sse2; 52 } else { 53 TRACE("resolve_colorspace: gfx_conv_YCbCr420p_RGB32_c\n"); 54 return gfx_conv_YCbCr420p_RGB32_c; 55 } 56 } 57 58 if (pixelFormat == PIX_FMT_YUV422P || pixelFormat == PIX_FMT_YUVJ422P) { 59 if (cpu.HasSSE2()) { 60 return gfx_conv_yuv422p_rgba32_sse2; 61 } else { 62 return gfx_conv_YCbCr422_RGB32_c; 63 } 64 } 65 66 TRACE("resolve_colorspace: %s => B_RGB32: NULL\n", pixfmt_to_string(pixelFormat)); 67 return NULL; 68 69 case B_RGB24_BIG: 70 TRACE("resolve_colorspace: %s => B_RGB24_BIG: NULL\n", pixfmt_to_string(pixelFormat)); 71 return NULL; 72 73 case B_RGB24: 74 TRACE("resolve_colorspace: %s => B_RGB24: NULL\n", pixfmt_to_string(pixelFormat)); 75 return NULL; 76 77 case B_YCbCr422: 78 79 if (pixelFormat == PIX_FMT_YUV410P) { 80 // if (cpu.HasMMX()) { 81 // TRACE("resolve_colorspace: gfx_conv_yuv410p_ycbcr422_mmx\n"); 82 // return gfx_conv_yuv410p_ycbcr422_mmx; 83 // } else { 84 TRACE("resolve_colorspace: gfx_conv_yuv410p_ycbcr422_c\n"); 85 return gfx_conv_yuv410p_ycbcr422_c; 86 // } 87 } 88 89 if (pixelFormat == PIX_FMT_YUV411P) { 90 // if (cpu.HasMMX()) { 91 // TRACE("resolve_colorspace: gfx_conv_yuv411p_ycbcr422_mmx\n"); 92 // return gfx_conv_yuv411p_ycbcr422_mmx; 93 // } else { 94 TRACE("resolve_colorspace: gfx_conv_yuv411p_ycbcr422_c\n"); 95 return gfx_conv_yuv411p_ycbcr422_c; 96 // } 97 } 98 99 if (pixelFormat == PIX_FMT_YUV420P || pixelFormat == PIX_FMT_YUVJ420P) { 100 // if (cpu.HasMMX()) { 101 // TRACE("resolve_colorspace: gfx_conv_yuv420p_ycbcr422_mmx\n"); 102 // return gfx_conv_yuv420p_ycbcr422_mmx; 103 // } else { 104 TRACE("resolve_colorspace: gfx_conv_yuv420p_ycbcr422_c\n"); 105 return gfx_conv_yuv420p_ycbcr422_c; 106 // } 107 } 108 109 if (pixelFormat == PIX_FMT_YUYV422) { 110 // if (cpu.HasMMX()) { 111 // TRACE("resolve_colorspace: PIX_FMT_YUV422 => B_YCbCr422: gfx_conv_null_mmx\n"); 112 // return gfx_conv_null_mmx; 113 // } else { 114 TRACE("resolve_colorspace: PIX_FMT_YUV422 => B_YCbCr422: gfx_conv_null_c\n"); 115 return gfx_conv_null_c; 116 // } 117 } 118 119 TRACE("resolve_colorspace: %s => B_YCbCr422: NULL\n", pixfmt_to_string(pixelFormat)); 120 return gfx_conv_null_c; 121 122 default: 123 TRACE("resolve_colorspace: default: NULL !!!\n"); 124 return NULL; 125 } 126 } 127 128 const char* 129 pixfmt_to_string(int p) 130 { 131 switch(p) { 132 case PIX_FMT_NONE: return "PIX_FMT_NONE"; 133 case PIX_FMT_YUV420P: return "PIX_FMT_YUV420P"; ///< planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples) 134 case PIX_FMT_YUYV422: return "PIX_FMT_YUYV422"; ///< packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr 135 case PIX_FMT_RGB24: return "PIX_FMT_RGB24"; ///< packed RGB 8:8:8, 24bpp, RGBRGB... 136 case PIX_FMT_BGR24: return "PIX_FMT_BGR24"; ///< packed RGB 8:8:8, 24bpp, BGRBGR... 137 case PIX_FMT_YUV422P: return "PIX_FMT_YUV422P"; ///< planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples) 138 case PIX_FMT_YUV444P: return "PIX_FMT_YUV444P"; ///< planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples) 139 case PIX_FMT_RGB32: return "PIX_FMT_RGB32"; ///< packed RGB 8:8:8, 32bpp, (msb)8A 8R 8G 8B(lsb), in CPU endianness 140 case PIX_FMT_YUV410P: return "PIX_FMT_YUV410P"; ///< planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples) 141 case PIX_FMT_YUV411P: return "PIX_FMT_YUV411P"; ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) 142 case PIX_FMT_RGB565: return "PIX_FMT_RGB565"; ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), in CPU endianness 143 case PIX_FMT_RGB555: return "PIX_FMT_RGB555"; ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), in CPU endianness, most significant bit to 0 144 case PIX_FMT_GRAY8: return "PIX_FMT_GRAY8"; ///< Y , 8bpp 145 case PIX_FMT_MONOWHITE: return "PIX_FMT_MONOWHITE"; ///< Y , 1bpp, 0 is white, 1 is black 146 case PIX_FMT_MONOBLACK: return "PIX_FMT_MONOBLACK"; ///< Y , 1bpp, 0 is black, 1 is white 147 case PIX_FMT_PAL8: return "PIX_FMT_PAL8"; ///< 8 bit with PIX_FMT_RGB32 palette 148 case PIX_FMT_YUVJ420P: return "PIX_FMT_YUVJ420P - YUV420P (Jpeg)"; ///< planar YUV 4:2:0, 12bpp, full scale (JPEG) 149 case PIX_FMT_YUVJ422P: return "PIX_FMT_YUVJ422P - YUV422P (Jpeg)"; ///< planar YUV 4:2:2, 16bpp, full scale (JPEG) 150 case PIX_FMT_YUVJ444P: return "PIX_FMT_YUVJ444P"; ///< planar YUV 4:4:4, 24bpp, full scale (JPEG) 151 case PIX_FMT_XVMC_MPEG2_MC: return "PIX_FMT_XVMC_MPEG2_MC";///< XVideo Motion Acceleration via common packet passing 152 case PIX_FMT_XVMC_MPEG2_IDCT: return "PIX_FMT_XVMC_MPEG2_IDCT"; 153 case PIX_FMT_UYVY422: return "PIX_FMT_UYVY422"; ///< packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1 154 case PIX_FMT_UYYVYY411: return "PIX_FMT_UYYVYY411"; ///< packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3 155 case PIX_FMT_BGR32: return "PIX_FMT_BGR32"; ///< packed RGB 8:8:8, 32bpp, (msb)8A 8B 8G 8R(lsb), in CPU endianness 156 case PIX_FMT_BGR565: return "PIX_FMT_BGR565"; ///< packed RGB 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), in CPU endianness 157 case PIX_FMT_BGR555: return "PIX_FMT_BGR555"; ///< packed RGB 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), in CPU endianness, most significant bit to 1 158 case PIX_FMT_BGR8: return "PIX_FMT_BGR8"; ///< packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb) 159 case PIX_FMT_BGR4: return "PIX_FMT_BGR4"; ///< packed RGB 1:2:1, 4bpp, (msb)1B 2G 1R(lsb) 160 case PIX_FMT_BGR4_BYTE: return "PIX_FMT_BGR4_BYTE"; ///< packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb) 161 case PIX_FMT_RGB8: return "PIX_FMT_RGB8"; ///< packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb) 162 case PIX_FMT_RGB4: return "PIX_FMT_RGB4"; ///< packed RGB 1:2:1, 4bpp, (msb)1R 2G 1B(lsb) 163 case PIX_FMT_RGB4_BYTE: return "PIX_FMT_RGB4_BYTE"; ///< packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb) 164 case PIX_FMT_NV12: return "PIX_FMT_NV12"; ///< planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 for UV 165 case PIX_FMT_NV21: return "PIX_FMT_NV21"; ///< as above, but U and V bytes are swapped 166 case PIX_FMT_RGB32_1: return "PIX_FMT_RGB32_1"; ///< packed RGB 8:8:8, 32bpp, (msb)8R 8G 8B 8A(lsb), in CPU endianness 167 case PIX_FMT_BGR32_1: return "PIX_FMT_BGR32_1"; ///< packed RGB 8:8:8, 32bpp, (msb)8B 8G 8R 8A(lsb), in CPU endianness 168 case PIX_FMT_GRAY16BE: return "PIX_FMT_GRAY16BE"; ///< Y , 16bpp, big-endian 169 case PIX_FMT_GRAY16LE: return "PIX_FMT_GRAY16LE"; ///< Y , 16bpp, little-endian 170 case PIX_FMT_YUV440P: return "PIX_FMT_YUV440P"; ///< planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples) 171 case PIX_FMT_YUVJ440P: return "PIX_FMT_YUVJ440P - YUV440P (Jpeg)"; ///< planar YUV 4:4:0 full scale (JPEG) 172 case PIX_FMT_YUVA420P: return "PIX_FMT_YUVA420P - YUV420P (Alpha)"; ///< planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples) 173 case PIX_FMT_VDPAU_H264: return "PIX_FMT_VDPAU_H264";///< H.264 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers 174 case PIX_FMT_VDPAU_MPEG1: return "PIX_FMT_VDPAU_MPEG1";///< MPEG-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers 175 case PIX_FMT_VDPAU_MPEG2: return "PIX_FMT_VDPAU_MPEG2";///< MPEG-2 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers 176 case PIX_FMT_VDPAU_WMV3: return "PIX_FMT_VDPAU_WMV3";///< WMV3 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers 177 case PIX_FMT_VDPAU_VC1: return "PIX_FMT_VDPAU_VC1"; ///< VC-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers 178 case PIX_FMT_RGB48BE: return "PIX_FMT_RGB48BE"; ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, big-endian 179 case PIX_FMT_RGB48LE: return "PIX_FMT_RGB48LE"; ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, little-endian 180 case PIX_FMT_VAAPI_MOCO: return "PIX_FMT_VAAPI_MOCO"; ///< HW acceleration through VA API at motion compensation entry-point, Picture.data[0] contains a vaapi_render_state struct which contains macroblocks as well as various fields extracted from headers 181 case PIX_FMT_VAAPI_IDCT: return "PIX_FMT_VAAPI_IDCT"; ///< HW acceleration through VA API at IDCT entry-point, Picture.data[0] contains a vaapi_render_state struct which contains fields extracted from headers 182 case PIX_FMT_VAAPI_VLD: return "PIX_FMT_VAAPI_VLD"; ///< HW decoding through VA API, Picture.data[0] contains a vaapi_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers 183 default: 184 return "(unknown)"; 185 } 186 } 187 188 189 color_space 190 pixfmt_to_colorspace(int p) 191 { 192 switch(p) { 193 default: 194 case PIX_FMT_NONE: 195 return B_NO_COLOR_SPACE; 196 197 case PIX_FMT_YUV420P: return B_YUV420; ///< planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples) 198 case PIX_FMT_YUYV422: return B_YUV422; ///< packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr 199 case PIX_FMT_RGB24: return B_RGB24_BIG; ///< packed RGB 8:8:8, 24bpp, RGBRGB... 200 case PIX_FMT_BGR24: return B_RGB24; ///< packed RGB 8:8:8, 24bpp, BGRBGR... 201 case PIX_FMT_YUV422P: return B_YUV422; ///< planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples) 202 case PIX_FMT_YUV444P: return B_YUV444; ///< planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples) 203 case PIX_FMT_RGB32: return B_RGBA32_BIG; ///< packed RGB 8:8:8, 32bpp, (msb)8A 8R 8G 8B(lsb), in CPU endianness 204 case PIX_FMT_YUV410P: return B_YUV9; ///< planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples) 205 case PIX_FMT_YUV411P: return B_YUV12; ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) 206 case PIX_FMT_RGB565: return B_RGB16_BIG; ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), in CPU endianness 207 case PIX_FMT_RGB555: return B_RGB15_BIG; ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), in CPU endianness, most significant bit to 0 208 case PIX_FMT_GRAY8: return B_GRAY8; ///< Y , 8bpp 209 // case PIX_FMT_MONOWHITE: return B_GRAY1; ///< Y , 1bpp, 0 is white, 1 is black 210 case PIX_FMT_MONOBLACK: return B_GRAY1; ///< Y , 1bpp, 0 is black, 1 is white 211 case PIX_FMT_PAL8: return B_CMAP8; ///< 8 bit with PIX_FMT_RGB32 palette 212 // case PIX_FMT_YUVJ420P: return "PIX_FMT_YUVJ420P - YUV420P (Jpeg)"; ///< planar YUV 4:2:0, 12bpp, full scale (JPEG) 213 // case PIX_FMT_YUVJ422P: return "PIX_FMT_YUVJ422P - YUV422P (Jpeg)"; ///< planar YUV 4:2:2, 16bpp, full scale (JPEG) 214 // case PIX_FMT_YUVJ444P: return "PIX_FMT_YUVJ444P"; ///< planar YUV 4:4:4, 24bpp, full scale (JPEG) 215 // case PIX_FMT_XVMC_MPEG2_MC: return "PIX_FMT_XVMC_MPEG2_MC";///< XVideo Motion Acceleration via common packet passing 216 // case PIX_FMT_XVMC_MPEG2_IDCT: return "PIX_FMT_XVMC_MPEG2_IDCT"; 217 // case PIX_FMT_UYVY422: return "PIX_FMT_UYVY422"; ///< packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1 218 // case PIX_FMT_UYYVYY411: return "PIX_FMT_UYYVYY411"; ///< packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3 219 case PIX_FMT_BGR32: return B_RGB32; ///< packed RGB 8:8:8, 32bpp, (msb)8A 8B 8G 8R(lsb), in CPU endianness 220 case PIX_FMT_BGR565: return B_RGB16; ///< packed RGB 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), in CPU endianness 221 case PIX_FMT_BGR555: return B_RGB15; ///< packed RGB 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), in CPU endianness, most significant bit to 1 222 // case PIX_FMT_BGR8: return "PIX_FMT_BGR8"; ///< packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb) 223 // case PIX_FMT_BGR4: return "PIX_FMT_BGR4"; ///< packed RGB 1:2:1, 4bpp, (msb)1B 2G 1R(lsb) 224 // case PIX_FMT_BGR4_BYTE: return "PIX_FMT_BGR4_BYTE"; ///< packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb) 225 // case PIX_FMT_RGB8: return "PIX_FMT_RGB8"; ///< packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb) 226 // case PIX_FMT_RGB4: return "PIX_FMT_RGB4"; ///< packed RGB 1:2:1, 4bpp, (msb)1R 2G 1B(lsb) 227 // case PIX_FMT_RGB4_BYTE: return "PIX_FMT_RGB4_BYTE"; ///< packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb) 228 // case PIX_FMT_NV12: return "PIX_FMT_NV12"; ///< planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 for UV 229 // case PIX_FMT_NV21: return "PIX_FMT_NV21"; ///< as above, but U and V bytes are swapped 230 // case PIX_FMT_RGB32_1: return "PIX_FMT_RGB32_1"; ///< packed RGB 8:8:8, 32bpp, (msb)8R 8G 8B 8A(lsb), in CPU endianness 231 // case PIX_FMT_BGR32_1: return "PIX_FMT_BGR32_1"; ///< packed RGB 8:8:8, 32bpp, (msb)8B 8G 8R 8A(lsb), in CPU endianness 232 // case PIX_FMT_GRAY16BE: return "PIX_FMT_GRAY16BE"; ///< Y , 16bpp, big-endian 233 // case PIX_FMT_GRAY16LE: return "PIX_FMT_GRAY16LE"; ///< Y , 16bpp, little-endian 234 // case PIX_FMT_YUV440P: return "PIX_FMT_YUV440P"; ///< planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples) 235 // case PIX_FMT_YUVJ440P: return "PIX_FMT_YUVJ440P - YUV440P (Jpeg)"; ///< planar YUV 4:4:0 full scale (JPEG) 236 // case PIX_FMT_YUVA420P: return "PIX_FMT_YUVA420P - YUV420P (Alpha)"; ///< planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples) 237 // case PIX_FMT_VDPAU_H264: return "PIX_FMT_VDPAU_H264";///< H.264 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers 238 // case PIX_FMT_VDPAU_MPEG1: return "PIX_FMT_VDPAU_MPEG1";///< MPEG-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers 239 // case PIX_FMT_VDPAU_MPEG2: return "PIX_FMT_VDPAU_MPEG2";///< MPEG-2 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers 240 // case PIX_FMT_VDPAU_WMV3: return "PIX_FMT_VDPAU_WMV3";///< WMV3 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers 241 // case PIX_FMT_VDPAU_VC1: return "PIX_FMT_VDPAU_VC1"; ///< VC-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers 242 // case PIX_FMT_RGB48BE: return "PIX_FMT_RGB48BE"; ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, big-endian 243 // case PIX_FMT_RGB48LE: return "PIX_FMT_RGB48LE"; ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, little-endian 244 // case PIX_FMT_VAAPI_MOCO: return "PIX_FMT_VAAPI_MOCO"; ///< HW acceleration through VA API at motion compensation entry-point, Picture.data[0] contains a vaapi_render_state struct which contains macroblocks as well as various fields extracted from headers 245 // case PIX_FMT_VAAPI_IDCT: return "PIX_FMT_VAAPI_IDCT"; ///< HW acceleration through VA API at IDCT entry-point, Picture.data[0] contains a vaapi_render_state struct which contains fields extracted from headers 246 // case PIX_FMT_VAAPI_VLD: return "PIX_FMT_VAAPI_VLD"; ///< HW decoding through VA API, Picture.data[0] contains a vaapi_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers 247 } 248 } 249 250 251 #define BEGIN_TAG "\033[31m" 252 #define END_TAG "\033[0m" 253 254 void dump_ffframe(AVFrame *frame, const char *name) 255 { 256 const char *picttypes[] = {"no pict type", "intra", "predicted", "bidir pre", "s(gmc)-vop"}; 257 printf(BEGIN_TAG"AVFrame(%s) pts:%-10lld cnum:%-5d dnum:%-5d %s%s, ]\n"END_TAG, 258 name, 259 frame->pts, 260 frame->coded_picture_number, 261 frame->display_picture_number, 262 // frame->quality, 263 frame->key_frame?"keyframe, ":"", 264 picttypes[frame->pict_type]); 265 // printf(BEGIN_TAG"\t\tlinesize[] = {%ld, %ld, %ld, %ld}\n"END_TAG, frame->linesize[0], frame->linesize[1], frame->linesize[2], frame->linesize[3]); 266 } 267 268