52 const uint8_t *val_table,
int nb_codes,
53 int use_static,
int is_ac)
56 uint16_t huff_code[256];
57 uint16_t huff_sym[256];
64 for (i = 0; i < 256; i++)
65 huff_sym[i] = i + 16 * is_ac;
68 huff_sym[0] = 16 * 256;
71 huff_code, 2, 2, huff_sym, 2, 2, use_static);
93 if (len > 14 && buf[12] == 1)
95 if (len > 14 && buf[12] == 2)
141 "error using external huffman table, switching back to internal\n");
189 for (i = 0; i < 64; i++) {
199 len -= 1 + 64 * (1+pr);
229 for (i = 1; i <= 16; i++) {
234 if (len < n || n > 256)
238 for (i = 0; i <
n; i++) {
249 class, index, code_max + 1);
250 if ((ret =
build_vlc(&s->
vlcs[
class][index], bits_table, val_table,
251 code_max + 1, 0,
class > 0)) < 0)
256 if ((ret =
build_vlc(&s->
vlcs[2][index], bits_table, val_table,
257 code_max + 1, 0, 0)) < 0)
279 if (bits > 16 || bits < 1) {
309 if (s->
buf_size && (width + 7) / 8 * ((height + 7) / 8) > s->
buf_size * 4LL)
313 if (nb_components <= 0 ||
319 "nb_components changing in interlaced picture\n");
323 if (s->
ls && !(bits <= 8 || nb_components == 1)) {
325 "JPEG-LS that is not <= 8 "
326 "bits/component or 16-bit gray");
332 for (i = 0; i < nb_components; i++) {
338 if (h_count[i] > s->
h_max)
339 s->
h_max = h_count[i];
340 if (v_count[i] > s->
v_max)
341 s->
v_max = v_count[i];
347 if (!h_count[i] || !v_count[i]) {
349 "Invalid sampling factor in component %d %d:%d\n",
350 i, h_count[i], v_count[i]);
355 i, h_count[i], v_count[i],
358 if ( nb_components == 4
373 memcmp(s->
h_count, h_count,
sizeof(h_count)) ||
374 memcmp(s->
v_count, v_count,
sizeof(v_count))) {
379 memcpy(s->
h_count, h_count,
sizeof(h_count));
380 memcpy(s->
v_count, v_count,
sizeof(v_count));
409 if (s->
v_max == 1 && s->
h_max == 1 && s->
lossless==1 && (nb_components==3 || nb_components==4))
414 pix_fmt_id = ((unsigned)s->
h_count[0] << 28) | (s->
v_count[0] << 24) |
421 if (!(pix_fmt_id & 0xD0D0D0D0))
422 pix_fmt_id -= (pix_fmt_id & 0xF0F0F0F0) >> 1;
423 if (!(pix_fmt_id & 0x0D0D0D0D))
424 pix_fmt_id -= (pix_fmt_id & 0x0F0F0F0F) >> 1;
426 for (i = 0; i < 8; i++) {
427 int j = 6 + (i&1) - (i&6);
428 int is = (pix_fmt_id >> (4*i)) & 0xF;
429 int js = (pix_fmt_id >> (4*j)) & 0xF;
431 if (is == 1 && js != 2 && (i < 2 || i > 5))
432 js = (pix_fmt_id >> ( 8 + 4*(i&1))) & 0xF;
433 if (is == 1 && js != 2 && (i < 2 || i > 5))
434 js = (pix_fmt_id >> (16 + 4*(i&1))) & 0xF;
436 if (is == 1 && js == 2) {
442 switch (pix_fmt_id) {
530 if (pix_fmt_id == 0x14111100)
570 if (pix_fmt_id == 0x42111100) {
574 }
else if (pix_fmt_id == 0x24111100) {
611 else if (s->
bits <= 8)
630 for (i = 0; i < 4; i++)
637 if (len != (8 + (3 * nb_components)))
651 int bw = (width + s->
h_max * 8 - 1) / (s->
h_max * 8);
652 int bh = (height + s->
v_max * 8 - 1) / (s->
v_max * 8);
672 if (code < 0 || code > 16) {
674 "mjpeg_decode_dc: bad vlc: %d:%d (%p)\n",
675 0, dc_index, &s->
vlcs[0][dc_index]);
687 int dc_index,
int ac_index, int16_t *quant_matrix)
693 if (val == 0xfffff) {
697 val = val * (unsigned)quant_matrix[0] + s->
last_dc[component];
698 val = av_clip_int16(val);
708 i += ((unsigned)code) >> 4;
716 int sign = (~cache) >> 31;
717 level = (
NEG_USR32(sign ^ cache,code) ^ sign) - sign;
727 block[j] = level * quant_matrix[j];
736 int component,
int dc_index,
737 int16_t *quant_matrix,
int Al)
742 if (val == 0xfffff) {
746 val = (val * (quant_matrix[0] << Al)) + s->
last_dc[component];
754 uint8_t *last_nnz,
int ac_index,
755 int16_t *quant_matrix,
756 int ss,
int se,
int Al,
int *EOBRUN)
768 for (i = ss; ; i++) {
772 run = ((unsigned) code) >> 4;
781 int sign = (~cache) >> 31;
782 level = (
NEG_USR32(sign ^ cache,code) ^ sign) - sign;
790 block[j] = level * (quant_matrix[j] << Al);
797 block[j] = level * (quant_matrix[j] << Al);
826 #define REFINE_BIT(j) { \
827 UPDATE_CACHE(re, &s->gb); \
828 sign = block[j] >> 15; \
829 block[j] += SHOW_UBITS(re, &s->gb, 1) * \
830 ((quant_matrix[j] ^ sign) - sign) << Al; \
831 LAST_SKIP_BITS(re, &s->gb, 1); \
839 av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i); \
844 j = s->scantable.permutated[i]; \
847 else if (run-- == 0) \
854 int ac_index, int16_t *quant_matrix,
855 int ss,
int se,
int Al,
int *EOBRUN)
857 int code, i = ss, j, sign,
val,
run;
858 int last =
FFMIN(se, *last_nnz);
869 run = ((unsigned) code) >> 4;
876 block[j] = ((quant_matrix[j] << Al) ^ val) -
val;
884 run = ((unsigned) code) >> 4;
905 for (; i <= last; i++) {
926 for (i = 0; i < nb_components; i++)
940 for (i = 0; i < nb_components; i++)
955 int left[4], top[4], topleft[4];
956 const int linesize = s->
linesize[0];
957 const int mask = ((1 << s->
bits) - 1) << point_transform;
973 for (i = 0; i < 4; i++)
976 for (mb_y = 0; mb_y < s->
mb_height; mb_y++) {
980 ptr += linesize >> 1;
982 for (i = 0; i < 4; i++)
983 top[i] = left[i] = topleft[i] =
buffer[0][i];
985 for (mb_x = 0; mb_x < s->
mb_width; mb_x++) {
998 top[i] = left[i]= topleft[i]= 1 << (s->
bits - 1);
1000 if (mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || !mb_x)
1001 modified_predictor = 1;
1003 for (i=0;i<nb_components;i++) {
1006 topleft[i] = top[i];
1007 top[i] =
buffer[mb_x][i];
1009 PREDICT(pred, topleft[i], top[i], left[i], modified_predictor);
1015 left[i] =
buffer[mb_x][i] =
1016 mask & (pred + (unsigned)(dc * (1 << point_transform)));
1025 for (mb_x = 0; mb_x < s->
mb_width; mb_x++) {
1026 ptr[4*mb_x + 2] =
buffer[mb_x][0] - ((
buffer[mb_x][1] +
buffer[mb_x][2] - 0x200) >> 2);
1027 ptr[4*mb_x + 1] =
buffer[mb_x][1] + ptr[4*mb_x + 2];
1028 ptr[4*mb_x + 3] =
buffer[mb_x][2] + ptr[4*mb_x + 2];
1029 ptr[4*mb_x + 0] =
buffer[mb_x][3];
1032 for(i=0; i<nb_components; i++) {
1035 for(mb_x = 0; mb_x < s->
mb_width; mb_x++) {
1036 ptr[4*mb_x+3-
c] =
buffer[mb_x][i];
1038 }
else if(s->
bits == 9) {
1041 for(mb_x = 0; mb_x < s->
mb_width; mb_x++) {
1042 ((uint16_t*)ptr)[4*mb_x+
c] =
buffer[mb_x][i];
1046 }
else if (s->
rct) {
1047 for (mb_x = 0; mb_x < s->
mb_width; mb_x++) {
1048 ptr[3*mb_x + 1] =
buffer[mb_x][0] - ((
buffer[mb_x][1] +
buffer[mb_x][2] - 0x200) >> 2);
1049 ptr[3*mb_x + 0] =
buffer[mb_x][1] + ptr[3*mb_x + 1];
1050 ptr[3*mb_x + 2] =
buffer[mb_x][2] + ptr[3*mb_x + 1];
1053 for (mb_x = 0; mb_x < s->
mb_width; mb_x++) {
1055 ptr[3*mb_x + 0] =
buffer[mb_x][1] + ptr[3*mb_x + 1];
1056 ptr[3*mb_x + 2] =
buffer[mb_x][2] + ptr[3*mb_x + 1];
1059 for(i=0; i<nb_components; i++) {
1062 for(mb_x = 0; mb_x < s->
mb_width; mb_x++) {
1063 ptr[3*mb_x+2-
c] =
buffer[mb_x][i];
1065 }
else if(s->
bits == 9) {
1068 for(mb_x = 0; mb_x < s->
mb_width; mb_x++) {
1069 ((uint16_t*)ptr)[3*mb_x+2-
c] =
buffer[mb_x][i];
1079 int point_transform,
int nb_components)
1081 int i, mb_x, mb_y,
mask;
1083 int resync_mb_y = 0;
1084 int resync_mb_x = 0;
1086 point_transform += bits - s->
bits;
1087 mask = ((1 << s->
bits) - 1) << point_transform;
1089 av_assert0(nb_components>=1 && nb_components<=4);
1091 for (mb_y = 0; mb_y < s->
mb_height; mb_y++) {
1092 for (mb_x = 0; mb_x < s->
mb_width; mb_x++) {
1103 if(!mb_x || mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || s->
interlaced){
1104 int toprow = mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x;
1105 int leftcol = !mb_x || mb_y == resync_mb_y && mb_x == resync_mb_x;
1106 for (i = 0; i < nb_components; i++) {
1109 int n,
h,
v, x,
y,
c, j, linesize;
1118 if(bits>8) linesize /= 2;
1120 for(j=0; j<
n; j++) {
1126 if ( h * mb_x + x >= s->
width
1127 || v * mb_y + y >= s->
height) {
1129 }
else if (bits<=8) {
1132 if(x==0 && leftcol){
1133 pred= 1 << (bits - 1);
1138 if(x==0 && leftcol){
1139 pred= ptr[-linesize];
1141 PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1146 ptr += linesize >> 1;
1148 *ptr= pred + ((unsigned)dc << point_transform);
1150 ptr16 = (uint16_t*)(s->
picture_ptr->
data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x));
1152 if(x==0 && leftcol){
1153 pred= 1 << (bits - 1);
1158 if(x==0 && leftcol){
1159 pred= ptr16[-linesize];
1161 PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1166 ptr16 += linesize >> 1;
1168 *ptr16= pred + ((unsigned)dc << point_transform);
1177 for (i = 0; i < nb_components; i++) {
1180 int n,
h,
v, x,
y,
c, j, linesize,
dc;
1189 if(bits>8) linesize /= 2;
1191 for (j = 0; j <
n; j++) {
1197 if ( h * mb_x + x >= s->
width
1198 || v * mb_y + y >= s->
height) {
1200 }
else if (bits<=8) {
1202 (linesize * (v * mb_y +
y)) +
1204 PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1207 *ptr = pred + ((unsigned)dc << point_transform);
1209 ptr16 = (uint16_t*)(s->
picture_ptr->
data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x));
1210 PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1213 *ptr16= pred + ((unsigned)dc << point_transform);
1234 int linesize,
int lowres)
1239 case 1:
copy_block4(dst, src, linesize, linesize, 4);
1241 case 2:
copy_block2(dst, src, linesize, linesize, 2);
1243 case 3: *dst = *
src;
1250 int block_x, block_y;
1253 for (block_y=0; block_y<
size; block_y++)
1254 for (block_x=0; block_x<
size; block_x++)
1255 *(uint16_t*)(ptr + 2*block_x + block_y*linesize) <<= 16 - s->
bits;
1257 for (block_y=0; block_y<
size; block_y++)
1258 for (block_x=0; block_x<
size; block_x++)
1259 *(ptr + block_x + block_y*linesize) <<= 8 - s->
bits;
1264 int Al,
const uint8_t *mb_bitmask,
1265 int mb_bitmask_size,
1268 int i, mb_x, mb_y, chroma_h_shift, chroma_v_shift, chroma_width, chroma_height;
1273 int bytes_per_pixel = 1 + (s->
bits > 8);
1290 for (i = 0; i < nb_components; i++) {
1293 reference_data[
c] = reference ? reference->
data[
c] :
NULL;
1298 for (mb_y = 0; mb_y < s->
mb_height; mb_y++) {
1299 for (mb_x = 0; mb_x < s->
mb_width; mb_x++) {
1310 for (i = 0; i < nb_components; i++) {
1312 int n,
h,
v, x,
y,
c, j;
1320 for (j = 0; j <
n; j++) {
1321 block_offset = (((linesize[
c] * (v * mb_y +
y) * 8) +
1322 (h * mb_x + x) * 8 * bytes_per_pixel) >> s->
avctx->
lowres);
1325 block_offset += linesize[
c] >> 1;
1326 if ( 8*(h * mb_x + x) < ((c == 1) || (c == 2) ? chroma_width : s->
width)
1327 && 8*(v * mb_y + y) < ((c == 1) || (c == 2) ? chroma_height : s->
height)) {
1328 ptr = data[
c] + block_offset;
1343 "error y=%d x=%d\n", mb_y, mb_x);
1363 "error y=%d x=%d\n", mb_y, mb_x);
1367 ff_dlog(s->
avctx,
"mb: %d %d processed\n", mb_y, mb_x);
1370 (v * mb_y + y) * 8, (h * mb_x + x) * 8);
1385 int se,
int Ah,
int Al)
1393 if (se < ss || se > 63) {
1404 for (mb_y = 0; mb_y < s->
mb_height; mb_y++) {
1412 for (mb_x = 0; mb_x < s->
mb_width; mb_x++,
block++, last_nnz++) {
1419 quant_matrix, ss, se, Al, &EOBRUN);
1422 quant_matrix, ss, se, Al, &EOBRUN);
1425 "error y=%d x=%d\n", mb_y, mb_x);
1440 const int bytes_per_pixel = 1 + (s->
bits > 8);
1441 const int block_size = s->
lossless ? 1 : 8;
1448 int mb_width = (s->
width + h * block_size - 1) / (h * block_size);
1449 int mb_height = (s->
height + v * block_size - 1) / (v * block_size);
1455 data += linesize >> 1;
1457 for (mb_y = 0; mb_y < mb_height; mb_y++) {
1461 for (mb_x = 0; mb_x < mb_width; mb_x++,
block++) {
1472 int mb_bitmask_size,
const AVFrame *reference)
1476 const int block_size = s->
lossless ? 1 : 8;
1477 int ilv, prev_shift;
1481 "Can not process SOS before SOF, skipping\n");
1500 "decode_sos: nb_components (%d) unsupported\n", nb_components);
1503 if (len != 6 + 2 * nb_components) {
1507 for (i = 0; i < nb_components; i++) {
1516 "decode_sos: index(%d) out of components\n", index);
1532 index = (index+2)%3;
1552 prev_shift = point_transform = 0;
1554 if (nb_components > 1) {
1558 }
else if (!s->
ls) {
1561 s->
mb_width = (s->
width + h * block_size - 1) / (h * block_size);
1570 s->
lossless ?
"lossless" :
"sequential DCT", s->
rgb ?
"RGB" :
"",
1580 for (i = 0; i < nb_components; i++)
1590 point_transform, ilv)) < 0)
1599 nb_components)) < 0)
1608 point_transform)) < 0)
1612 prev_shift, point_transform,
1613 mb_bitmask, mb_bitmask_size, reference)) < 0)
1697 int t_w, t_h, v1, v2;
1709 "mjpeg: JFIF header found (version: %x.%x) SAR=%d/%d\n",
1718 if (len -10 - (t_w * t_h * 3) > 0)
1719 len -= t_w * t_h * 3;
1741 "Pegasus lossless jpeg header found\n");
1771 if (
id ==
AV_RL32(
"colr") && len > 0) {
1778 if (
id ==
AV_RL32(
"xfrm") && len > 0) {
1806 }
else if (type == 1) {
1818 if (!(flags & 0x04)) {
1828 int ret,
le, ifd_offset, bytes_read;
1885 "mjpeg: error, decode_app parser read over the end\n");
1899 for (i = 0; i < len - 2; i++)
1901 if (i > 0 && cbuf[i - 1] ==
'\n')
1910 if (!strncmp(cbuf,
"AVID", 4)) {
1912 }
else if (!strcmp(cbuf,
"CS=ITU601"))
1914 else if ((!strncmp(cbuf,
"Intel(R) JPEG Library, version 1", 32) && s->
avctx->
codec_tag) ||
1915 (!strncmp(cbuf,
"Metasoft MJPEG Codec", 20)))
1917 else if (!strcmp(cbuf,
"MULTISCOPE II"))
1936 buf_ptr = *pbuf_ptr;
1937 while (buf_end - buf_ptr > 1) {
1940 if ((v == 0xff) && (v2 >= 0xc0) && (v2 <= 0xfe) && buf_ptr < buf_end) {
1949 ff_dlog(
NULL,
"find_marker skipped %d bytes\n", skipped);
1950 *pbuf_ptr = buf_ptr;
1956 const uint8_t **unescaped_buf_ptr,
1957 int *unescaped_buf_size)
1967 if (start_code ==
SOS && !s->
ls) {
1971 while (src < buf_end) {
1977 while (src < buf_end && x == 0xff)
1980 if (x >= 0xd0 && x <= 0xd7)
1987 *unescaped_buf_ptr = s->
buffer;
1988 *unescaped_buf_size = dst - s->
buffer;
1989 memset(s->
buffer + *unescaped_buf_size, 0,
1993 (buf_end - *buf_ptr) - (dst - s->
buffer));
1994 }
else if (start_code ==
SOS && s->
ls) {
2002 while (src + t < buf_end) {
2005 while ((src + t < buf_end) && x == 0xff)
2020 if (x == 0xFF &&
b < t) {
2032 *unescaped_buf_ptr = dst;
2033 *unescaped_buf_size = (bit_count + 7) >> 3;
2034 memset(s->
buffer + *unescaped_buf_size, 0,
2037 *unescaped_buf_ptr = *buf_ptr;
2038 *unescaped_buf_size = buf_end - *buf_ptr;
2049 int buf_size = avpkt->
size;
2051 const uint8_t *buf_end, *buf_ptr;
2052 const uint8_t *unescaped_buf_ptr;
2054 int unescaped_buf_size;
2067 buf_end = buf + buf_size;
2068 while (buf_ptr < buf_end) {
2072 &unescaped_buf_size);
2074 if (start_code < 0) {
2076 }
else if (unescaped_buf_size > INT_MAX / 8) {
2078 "MJPEG packet 0x%x too big (%d/%d), corrupt data?\n",
2079 start_code, unescaped_buf_size, buf_size);
2083 start_code, buf_end - buf_ptr);
2097 if (start_code >= 0xd0 && start_code <= 0xd7)
2099 "restart marker: %d\n", start_code & 0x0f);
2101 else if (start_code >=
APP0 && start_code <=
APP15)
2104 else if (start_code ==
COM)
2110 (start_code ==
SOF48 || start_code ==
LSE)) {
2115 switch (start_code) {
2173 "Found EOI before any SOF, ignoring\n");
2191 int qpw = (s->
width + 15) / 16;
2194 memset(qp_table_buf->
data, qp, qpw);
2223 "mjpeg: unsupported coding type (%x)\n", start_code);
2230 "marker parser used %d bytes (%d bits)\n",
2262 for (p = 0; p<4; p++) {
2275 for (i = 0; i <
h; i++) {
2277 if (is16bit) ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 2];
2278 else line[w - 1] = line[(w - 1) / 2];
2279 for (index = w - 2; index > 0; index--) {
2281 ((uint16_t*)line)[
index] = (((uint16_t*)line)[index / 2] + ((uint16_t*)line)[(index + 1) / 2]) >> 1;
2283 line[
index] = (line[index / 2] + line[(index + 1) / 2]) >> 1;
2287 ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 3];
2289 ((uint16_t*)line)[w - 2] = ((uint16_t*)line)[w - 1];
2291 line[w - 1] = line[(w - 1) / 3];
2293 line[w - 2] = line[w - 1];
2295 for (index = w - 3; index > 0; index--) {
2296 line[
index] = (line[index / 3] + line[(index + 1) / 3] + line[(index + 2) / 3] + 1) / 3;
2320 for (p = 0; p < 4; p++) {
2331 for (i = h - 1; i; i--) {
2334 if (src1 == src2 || i == h - 1) {
2335 memcpy(dst, src1, w);
2337 for (index = 0; index < w; index++)
2338 dst[index] = (src1[index] + src2[index]) >> 1;
2347 for (index=0; index<4; index++) {
2351 if(index && index<3){
2357 for (i=0; i<h/2; i++) {
2359 FFSWAP(
int, dst[j], dst2[j]);
2369 for (i=0; i<
h; i++) {
2372 for (index=0; index<4; index++) {
2376 for (j=0; j<w; j++) {
2378 int r = dst[0][j] * k;
2379 int g = dst[1][j] * k;
2380 int b = dst[2][j] * k;
2381 dst[0][j] = g*257 >> 16;
2382 dst[1][j] = b*257 >> 16;
2383 dst[2][j] = r*257 >> 16;
2391 for (i=0; i<
h; i++) {
2394 for (index=0; index<4; index++) {
2398 for (j=0; j<w; j++) {
2400 int r = (255 - dst[0][j]) * k;
2401 int g = (128 - dst[1][j]) * k;
2402 int b = (128 - dst[2][j]) * k;
2403 dst[0][j] = r*257 >> 16;
2404 dst[1][j] = (g*257 >> 16) + 128;
2405 dst[2][j] = (b*257 >> 16) + 128;
2426 return buf_ptr -
buf;
2449 for (i = 0; i < 3; i++) {
2450 for (j = 0; j < 4; j++)
2467 #if CONFIG_MJPEG_DECODER
2468 #define OFFSET(x) offsetof(MJpegDecodeContext, x)
2469 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
2471 {
"extern_huff",
"Use external huffman table.",
2476 static const AVClass mjpegdec_class = {
2495 .priv_class = &mjpegdec_class,
2499 #if CONFIG_THP_DECODER
int block_stride[MAX_COMPONENTS]
const struct AVCodec * codec
const char const char void * val
const AVPixFmtDescriptor * pix_desc
!< stereoscopic information (cached, since it is read before frame allocation)
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Views are packed per line, as if interlaced.
int v_count[MAX_COMPONENTS]
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
This structure describes decoded (raw) audio or video data.
ptrdiff_t const GLvoid * data
static void flush(AVCodecContext *avctx)
void(* clear_block)(int16_t *block)
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
#define AV_LOG_WARNING
Something somehow does not look correct.
#define LIBAVUTIL_VERSION_INT
packed RGB 8:8:8, 24bpp, RGBRGB...
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
static void skip_bits_long(GetBitContext *s, int n)
static av_cold int init(AVCodecContext *avctx)
#define AV_PIX_FMT_RGBA64
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
#define avpriv_request_sample(...)
int h_scount[MAX_COMPONENTS]
int ff_mjpeg_decode_dqt(MJpegDecodeContext *s)
static int mjpeg_decode_com(MJpegDecodeContext *s)
enum AVColorRange color_range
MPEG vs JPEG YUV range.
int ff_init_vlc_sparse(VLC *vlc_arg, int nb_bits, int nb_codes, const void *bits, int bits_wrap, int bits_size, const void *codes, int codes_wrap, int codes_size, const void *symbols, int symbols_wrap, int symbols_size, int flags)
int qscale[4]
quantizer scale calculated from quant_matrixes
#define FF_CODEC_PROPERTY_LOSSLESS
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
int av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int qp_type)
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
int dc_index[MAX_COMPONENTS]
size_t av_get_codec_tag_string(char *buf, size_t buf_size, unsigned int codec_tag)
Put a string representing the codec tag codec_tag in buf.
int linesize[MAX_COMPONENTS]
linesize << interlaced
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
int ff_mjpeg_decode_dht(MJpegDecodeContext *s)
MJPEG encoder and decoder.
int comp_index[MAX_COMPONENTS]
static void mjpeg_idct_scan_progressive_ac(MJpegDecodeContext *s)
static void copy_block2(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
#define FF_QSCALE_TYPE_MPEG1
enum AVDiscard skip_frame
Skip decoding for selected frames.
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
#define av_assert0(cond)
assert() equivalent, that is always enabled.
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
void ff_mjpeg_build_huffman_codes(uint8_t *huff_size, uint16_t *huff_code, const uint8_t *bits_table, const uint8_t *val_table)
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
static int mjpeg_decode_dri(MJpegDecodeContext *s)
8 bit with AV_PIX_FMT_RGB32 palette
Stereo 3D type: this structure describes how two videos are packed within a single video surface...
uint16_t(* ljpeg_buffer)[4]
unsigned int ljpeg_buffer_size
int16_t quant_matrixes[4][64]
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
const uint8_t avpriv_mjpeg_bits_dc_luminance[17]
uint8_t * last_nnz[MAX_COMPONENTS]
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
int quant_sindex[MAX_COMPONENTS]
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range...
static int get_bits_count(const GetBitContext *s)
int h_count[MAX_COMPONENTS]
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
#define AV_PIX_FMT_YUV444P16
int interlaced_frame
The content of the picture is interlaced.
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
#define AV_PIX_FMT_YUVA420P16
const OptionDef options[]
void av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
#define PREDICT(ret, topleft, top, left, predictor)
static void predictor(uint8_t *src, int size)
static int get_bits_left(GetBitContext *gb)
int ff_mjpeg_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
AVDictionary * exif_metadata
#define UPDATE_CACHE(name, gb)
int width
width and height of the video frame
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
int flags
Additional information about the frame packing.
static int handle_rstn(MJpegDecodeContext *s, int nb_components)
static const uint16_t mask[17]
#define PTRDIFF_SPECIFIER
#define AV_EF_EXPLODE
abort decoding on minor error detection
int nb_blocks[MAX_COMPONENTS]
const uint8_t avpriv_mjpeg_bits_dc_chrominance[17]
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
av_cold int ff_mjpeg_decode_end(AVCodecContext *avctx)
static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
#define AV_PIX_FMT_YUVA444P16
int flags
AV_CODEC_FLAG_*.
simple assert() macros that are a bit more flexible than ISO C assert().
const char * name
Name of the codec implementation.
static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah, int Al, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
int ff_jpegls_decode_lse(MJpegDecodeContext *s)
Decode LSE block with initialization parameters.
static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
Video is not stereoscopic (and metadata has to be there).
#define CLOSE_READER(name, gb)
Libavcodec external API header.
static int build_vlc(VLC *vlc, const uint8_t *bits_table, const uint8_t *val_table, int nb_codes, int use_static, int is_ac)
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
static int decode_dc_progressive(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, int16_t *quant_matrix, int Al)
#define FF_DEBUG_STARTCODE
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
static av_always_inline void mjpeg_copy_block(MJpegDecodeContext *s, uint8_t *dst, const uint8_t *src, int linesize, int lowres)
int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
enum AVPictureType pict_type
Picture type of the frame.
#define AV_PIX_FMT_GBRP16
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
#define AV_PIX_FMT_GRAY16
static int decode_block(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, int ac_index, int16_t *quant_matrix)
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
int component_id[MAX_COMPONENTS]
static int mjpeg_decode_app(MJpegDecodeContext *s)
#define FF_CEIL_RSHIFT(a, b)
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
#define LAST_SKIP_BITS(name, gb, num)
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
int v_scount[MAX_COMPONENTS]
#define GET_VLC(code, name, gb, table, bits, max_depth)
If the vlc code is invalid and max_depth=1, then no bits will be removed.
uint8_t idct_permutation[64]
IDCT input permutation.
packed RGB 8:8:8, 24bpp, BGRBGR...
void avcodec_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
#define SHOW_UBITS(name, gb, num)
the normal 2^n-1 "JPEG" YUV ranges
static const float pred[4]
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
#define AV_PIX_FMT_YUV420P16
static av_always_inline int bytestream2_tell(GetByteContext *g)
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
void(* idct_put)(uint8_t *dest, int line_size, int16_t *block)
block -> idct -> clip to unsigned 8 bit -> dest.
enum AVStereo3DType type
How views are packed within the video.
#define AV_LOG_INFO
Standard information.
AVDictionary ** avpriv_frame_get_metadatap(AVFrame *frame)
AVBufferRef * av_buffer_alloc(int size)
Allocate an AVBuffer of the given size using av_malloc().
av_cold void ff_blockdsp_init(BlockDSPContext *c, AVCodecContext *avctx)
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
main external API structure.
uint8_t * data
The data buffer.
static int get_xbits(GetBitContext *s, int n)
read mpeg1 dc style vlc (sign bit + mantissa with no MSB).
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
#define OPEN_READER(name, gb)
int avpriv_exif_decode_ifd(AVCodecContext *avctx, GetByteContext *gbytes, int le, int depth, AVDictionary **metadata)
Recursively decodes all IFD's and adds included TAGS into the metadata dictionary.
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
const uint8_t avpriv_mjpeg_val_dc[12]
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
static unsigned int get_bits1(GetBitContext *s)
BYTE int const BYTE int int int height
static void init_idct(AVCodecContext *avctx)
int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, int point_transform, int ilv)
Describe the class of an AVClass context structure.
static void skip_bits(GetBitContext *s, int n)
static int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index)
int ac_index[MAX_COMPONENTS]
enum AVColorSpace colorspace
YUV colorspace type.
static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
#define GET_CACHE(name, gb)
uint16_t step_minus1
Number of elements between 2 horizontally consecutive pixels minus 1.
const uint8_t ff_zigzag_direct[64]
uint64_t coefs_finished[MAX_COMPONENTS]
bitmask of which coefs have been completely decoded (progressive mode)
#define AV_PIX_FMT_GBR24P
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
const uint8_t avpriv_mjpeg_bits_ac_chrominance[17]
#define CONFIG_JPEGLS_DECODER
static const uint8_t start_code[]
Views are on top of each other.
static int mjpeg_decode_scan_progressive_ac(MJpegDecodeContext *s, int ss, int se, int Ah, int Al)
const uint8_t avpriv_mjpeg_val_ac_chrominance[]
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
JPEG-LS extension parameters.
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
#define FF_DEBUG_PICT_INFO
int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
the normal 219*2^(n-8) "MPEG" YUV ranges
av_cold int ff_mjpeg_decode_init(AVCodecContext *avctx)
Views are next to each other.
static int decode(AVCodecContext *avctx, void *data, int *got_sub, AVPacket *avpkt)
static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor, int point_transform, int nb_components)
A reference to a data buffer.
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
common internal api header.
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
static void build_basic_mjpeg_vlc(MJpegDecodeContext *s)
static void copy_mb(CinepakEncContext *s, AVPicture *a, AVPicture *b)
planar GBRA 4:4:4:4 32bpp
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
static int decode_block_refinement(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, int16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
const uint8_t avpriv_mjpeg_bits_ac_luminance[17]
av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
int top_field_first
If the content is interlaced, is top field displayed first.
int got_picture
we found a SOF and picture is valid, too.
const uint8_t avpriv_mjpeg_val_ac_luminance[]
int16_t(*[MAX_COMPONENTS] blocks)[64]
intermediate sums (progressive mode)
static void copy_block4(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
VLC_TYPE(* table)[2]
code, bits
int key_frame
1 -> keyframe, 0-> not
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
int last_dc[MAX_COMPONENTS]
static const uint8_t * align_get_bits(GetBitContext *s)
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> out
static int decode_block_progressive(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, int16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> dc
static void * av_mallocz_array(size_t nmemb, size_t size)
static void decode_flush(AVCodecContext *avctx)
int frame_number
Frame counter, set by libavcodec.
int ff_tdecode_header(GetByteContext *gb, int *le, int *ifd_offset)
Decodes a TIFF header from the input bytestream and sets the endianness in *le and the offset to the ...
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
enum AVFieldOrder field_order
Field order.
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
mpeg1 4:2:0, jpeg 4:2:0, h263 4:2:0
#define FFSWAP(type, a, b)
int ff_mjpeg_find_marker(MJpegDecodeContext *s, const uint8_t **buf_ptr, const uint8_t *buf_end, const uint8_t **unescaped_buf_ptr, int *unescaped_buf_size)
AVStereo3D * av_stereo3d_alloc(void)
Allocate an AVStereo3D structure and set its fields to default values.
#define MKTAG(a, b, c, d)
This structure stores compressed data.
void ff_free_vlc(VLC *vlc)
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
#define AV_PIX_FMT_YUV422P16