FFmpeg  3.4.9
huffyuvenc.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2002-2014 Michael Niedermayer <michaelni@gmx.at>
3  *
4  * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
5  * the algorithm used
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  *
23  * yuva, gray, 4:4:4, 4:1:1, 4:1:0 and >8 bit per sample support sponsored by NOA
24  */
25 
26 /**
27  * @file
28  * huffyuv encoder
29  */
30 
31 #include "avcodec.h"
32 #include "huffyuv.h"
33 #include "huffman.h"
34 #include "huffyuvencdsp.h"
35 #include "internal.h"
36 #include "lossless_videoencdsp.h"
37 #include "put_bits.h"
38 #include "libavutil/opt.h"
39 #include "libavutil/pixdesc.h"
40 
41 static inline void diff_bytes(HYuvContext *s, uint8_t *dst,
42  const uint8_t *src0, const uint8_t *src1, int w)
43 {
44  if (s->bps <= 8) {
45  s->llvidencdsp.diff_bytes(dst, src0, src1, w);
46  } else {
47  s->hencdsp.diff_int16((uint16_t *)dst, (const uint16_t *)src0, (const uint16_t *)src1, s->n - 1, w);
48  }
49 }
50 
51 static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst,
52  const uint8_t *src, int w, int left)
53 {
54  int i;
55  if (s->bps <= 8) {
56  if (w < 32) {
57  for (i = 0; i < w; i++) {
58  const int temp = src[i];
59  dst[i] = temp - left;
60  left = temp;
61  }
62  return left;
63  } else {
64  for (i = 0; i < 32; i++) {
65  const int temp = src[i];
66  dst[i] = temp - left;
67  left = temp;
68  }
69  s->llvidencdsp.diff_bytes(dst + 32, src + 32, src + 31, w - 32);
70  return src[w-1];
71  }
72  } else {
73  const uint16_t *src16 = (const uint16_t *)src;
74  uint16_t *dst16 = ( uint16_t *)dst;
75  if (w < 32) {
76  for (i = 0; i < w; i++) {
77  const int temp = src16[i];
78  dst16[i] = temp - left;
79  left = temp;
80  }
81  return left;
82  } else {
83  for (i = 0; i < 16; i++) {
84  const int temp = src16[i];
85  dst16[i] = temp - left;
86  left = temp;
87  }
88  s->hencdsp.diff_int16(dst16 + 16, src16 + 16, src16 + 15, s->n - 1, w - 16);
89  return src16[w-1];
90  }
91  }
92 }
93 
94 static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst,
95  const uint8_t *src, int w,
96  int *red, int *green, int *blue,
97  int *alpha)
98 {
99  int i;
100  int r, g, b, a;
101  r = *red;
102  g = *green;
103  b = *blue;
104  a = *alpha;
105 
106  for (i = 0; i < FFMIN(w, 4); i++) {
107  const int rt = src[i * 4 + R];
108  const int gt = src[i * 4 + G];
109  const int bt = src[i * 4 + B];
110  const int at = src[i * 4 + A];
111  dst[i * 4 + R] = rt - r;
112  dst[i * 4 + G] = gt - g;
113  dst[i * 4 + B] = bt - b;
114  dst[i * 4 + A] = at - a;
115  r = rt;
116  g = gt;
117  b = bt;
118  a = at;
119  }
120 
121  s->llvidencdsp.diff_bytes(dst + 16, src + 16, src + 12, w * 4 - 16);
122 
123  *red = src[(w - 1) * 4 + R];
124  *green = src[(w - 1) * 4 + G];
125  *blue = src[(w - 1) * 4 + B];
126  *alpha = src[(w - 1) * 4 + A];
127 }
128 
129 static inline void sub_left_prediction_rgb24(HYuvContext *s, uint8_t *dst,
130  uint8_t *src, int w,
131  int *red, int *green, int *blue)
132 {
133  int i;
134  int r, g, b;
135  r = *red;
136  g = *green;
137  b = *blue;
138  for (i = 0; i < FFMIN(w, 16); i++) {
139  const int rt = src[i * 3 + 0];
140  const int gt = src[i * 3 + 1];
141  const int bt = src[i * 3 + 2];
142  dst[i * 3 + 0] = rt - r;
143  dst[i * 3 + 1] = gt - g;
144  dst[i * 3 + 2] = bt - b;
145  r = rt;
146  g = gt;
147  b = bt;
148  }
149 
150  s->llvidencdsp.diff_bytes(dst + 48, src + 48, src + 48 - 3, w * 3 - 48);
151 
152  *red = src[(w - 1) * 3 + 0];
153  *green = src[(w - 1) * 3 + 1];
154  *blue = src[(w - 1) * 3 + 2];
155 }
156 
157 static void sub_median_prediction(HYuvContext *s, uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int w, int *left, int *left_top)
158 {
159  if (s->bps <= 8) {
160  s->llvidencdsp.sub_median_pred(dst, src1, src2, w , left, left_top);
161  } else {
162  s->hencdsp.sub_hfyu_median_pred_int16((uint16_t *)dst, (const uint16_t *)src1, (const uint16_t *)src2, s->n - 1, w , left, left_top);
163  }
164 }
165 
167 {
168  int i;
169  int index = 0;
170  int n = s->vlc_n;
171 
172  for (i = 0; i < n;) {
173  int val = len[i];
174  int repeat = 0;
175 
176  for (; i < n && len[i] == val && repeat < 255; i++)
177  repeat++;
178 
179  av_assert0(val < 32 && val >0 && repeat < 256 && repeat>0);
180  if (repeat > 7) {
181  buf[index++] = val;
182  buf[index++] = repeat;
183  } else {
184  buf[index++] = val | (repeat << 5);
185  }
186  }
187 
188  return index;
189 }
190 
192 {
193  int i, ret;
194  int size = 0;
195  int count = 3;
196 
197  if (s->version > 2)
198  count = 1 + s->alpha + 2*s->chroma;
199 
200  for (i = 0; i < count; i++) {
201  if ((ret = ff_huff_gen_len_table(s->len[i], s->stats[i], s->vlc_n, 0)) < 0)
202  return ret;
203 
204  if (ff_huffyuv_generate_bits_table(s->bits[i], s->len[i], s->vlc_n) < 0) {
205  return -1;
206  }
207 
208  size += store_table(s, s->len[i], buf + size);
209  }
210  return size;
211 }
212 
214 {
215  HYuvContext *s = avctx->priv_data;
216  int i, j;
217  int ret;
219 
220  ff_huffyuv_common_init(avctx);
221  ff_huffyuvencdsp_init(&s->hencdsp, avctx);
223 
224  avctx->extradata = av_mallocz(3*MAX_N + 4);
225  if (s->flags&AV_CODEC_FLAG_PASS1) {
226 #define STATS_OUT_SIZE 21*MAX_N*3 + 4
227  avctx->stats_out = av_mallocz(STATS_OUT_SIZE); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
228  if (!avctx->stats_out)
229  return AVERROR(ENOMEM);
230  }
231  s->version = 2;
232 
233  if (!avctx->extradata)
234  return AVERROR(ENOMEM);
235 
236 #if FF_API_CODED_FRAME
239  avctx->coded_frame->key_frame = 1;
241 #endif
242 #if FF_API_PRIVATE_OPT
244  if (avctx->context_model == 1)
245  s->context = avctx->context_model;
247 #endif
248 
249  s->bps = desc->comp[0].depth;
250  s->yuv = !(desc->flags & AV_PIX_FMT_FLAG_RGB) && desc->nb_components >= 2;
251  s->chroma = desc->nb_components > 2;
252  s->alpha = !!(desc->flags & AV_PIX_FMT_FLAG_ALPHA);
254  &s->chroma_h_shift,
255  &s->chroma_v_shift);
256 
257  switch (avctx->pix_fmt) {
258  case AV_PIX_FMT_YUV420P:
259  case AV_PIX_FMT_YUV422P:
260  if (s->width & 1) {
261  av_log(avctx, AV_LOG_ERROR, "Width must be even for this colorspace.\n");
262  return AVERROR(EINVAL);
263  }
264  s->bitstream_bpp = avctx->pix_fmt == AV_PIX_FMT_YUV420P ? 12 : 16;
265  break;
266  case AV_PIX_FMT_YUV444P:
267  case AV_PIX_FMT_YUV410P:
268  case AV_PIX_FMT_YUV411P:
269  case AV_PIX_FMT_YUV440P:
270  case AV_PIX_FMT_GBRP:
271  case AV_PIX_FMT_GBRP9:
272  case AV_PIX_FMT_GBRP10:
273  case AV_PIX_FMT_GBRP12:
274  case AV_PIX_FMT_GBRP14:
275  case AV_PIX_FMT_GBRP16:
276  case AV_PIX_FMT_GRAY8:
277  case AV_PIX_FMT_GRAY16:
278  case AV_PIX_FMT_YUVA444P:
279  case AV_PIX_FMT_YUVA420P:
280  case AV_PIX_FMT_YUVA422P:
281  case AV_PIX_FMT_GBRAP:
282  case AV_PIX_FMT_YUV420P9:
287  case AV_PIX_FMT_YUV422P9:
292  case AV_PIX_FMT_YUV444P9:
306  s->version = 3;
307  break;
308  case AV_PIX_FMT_RGB32:
309  s->bitstream_bpp = 32;
310  break;
311  case AV_PIX_FMT_RGB24:
312  s->bitstream_bpp = 24;
313  break;
314  default:
315  av_log(avctx, AV_LOG_ERROR, "format not supported\n");
316  return AVERROR(EINVAL);
317  }
318  s->n = 1<<s->bps;
319  s->vlc_n = FFMIN(s->n, MAX_VLC_N);
320 
322  s->decorrelate = s->bitstream_bpp >= 24 && !s->yuv && !(desc->flags & AV_PIX_FMT_FLAG_PLANAR);
323 #if FF_API_PRIVATE_OPT
325  if (avctx->prediction_method)
326  s->predictor = avctx->prediction_method;
328 #endif
329  s->interlaced = avctx->flags & AV_CODEC_FLAG_INTERLACED_ME ? 1 : 0;
330  if (s->context) {
332  av_log(avctx, AV_LOG_ERROR,
333  "context=1 is not compatible with "
334  "2 pass huffyuv encoding\n");
335  return AVERROR(EINVAL);
336  }
337  }
338 
339  if (avctx->codec->id == AV_CODEC_ID_HUFFYUV) {
340  if (avctx->pix_fmt == AV_PIX_FMT_YUV420P) {
341  av_log(avctx, AV_LOG_ERROR,
342  "Error: YV12 is not supported by huffyuv; use "
343  "vcodec=ffvhuff or format=422p\n");
344  return AVERROR(EINVAL);
345  }
346 #if FF_API_PRIVATE_OPT
347  if (s->context) {
348  av_log(avctx, AV_LOG_ERROR,
349  "Error: per-frame huffman tables are not supported "
350  "by huffyuv; use vcodec=ffvhuff\n");
351  return AVERROR(EINVAL);
352  }
353  if (s->version > 2) {
354  av_log(avctx, AV_LOG_ERROR,
355  "Error: ver>2 is not supported "
356  "by huffyuv; use vcodec=ffvhuff\n");
357  return AVERROR(EINVAL);
358  }
359 #endif
360  if (s->interlaced != ( s->height > 288 ))
361  av_log(avctx, AV_LOG_INFO,
362  "using huffyuv 2.2.0 or newer interlacing flag\n");
363  }
364 
366  av_log(avctx, AV_LOG_ERROR, "Ver > 3 is under development, files encoded with it may not be decodable with future versions!!!\n"
367  "Use vstrict=-2 / -strict -2 to use it anyway.\n");
368  return AVERROR(EINVAL);
369  }
370 
371  if (s->bitstream_bpp >= 24 && s->predictor == MEDIAN && s->version <= 2) {
372  av_log(avctx, AV_LOG_ERROR,
373  "Error: RGB is incompatible with median predictor\n");
374  return AVERROR(EINVAL);
375  }
376 
377  ((uint8_t*)avctx->extradata)[0] = s->predictor | (s->decorrelate << 6);
378  ((uint8_t*)avctx->extradata)[2] = s->interlaced ? 0x10 : 0x20;
379  if (s->context)
380  ((uint8_t*)avctx->extradata)[2] |= 0x40;
381  if (s->version < 3) {
382  ((uint8_t*)avctx->extradata)[1] = s->bitstream_bpp;
383  ((uint8_t*)avctx->extradata)[3] = 0;
384  } else {
385  ((uint8_t*)avctx->extradata)[1] = ((s->bps-1)<<4) | s->chroma_h_shift | (s->chroma_v_shift<<2);
386  if (s->chroma)
387  ((uint8_t*)avctx->extradata)[2] |= s->yuv ? 1 : 2;
388  if (s->alpha)
389  ((uint8_t*)avctx->extradata)[2] |= 4;
390  ((uint8_t*)avctx->extradata)[3] = 1;
391  }
392  s->avctx->extradata_size = 4;
393 
394  if (avctx->stats_in) {
395  char *p = avctx->stats_in;
396 
397  for (i = 0; i < 4; i++)
398  for (j = 0; j < s->vlc_n; j++)
399  s->stats[i][j] = 1;
400 
401  for (;;) {
402  for (i = 0; i < 4; i++) {
403  char *next;
404 
405  for (j = 0; j < s->vlc_n; j++) {
406  s->stats[i][j] += strtol(p, &next, 0);
407  if (next == p) return -1;
408  p = next;
409  }
410  }
411  if (p[0] == 0 || p[1] == 0 || p[2] == 0) break;
412  }
413  } else {
414  for (i = 0; i < 4; i++)
415  for (j = 0; j < s->vlc_n; j++) {
416  int d = FFMIN(j, s->vlc_n - j);
417 
418  s->stats[i][j] = 100000000 / (d*d + 1);
419  }
420  }
421 
423  if (ret < 0)
424  return ret;
425  s->avctx->extradata_size += ret;
426 
427  if (s->context) {
428  for (i = 0; i < 4; i++) {
429  int pels = s->width * s->height / (i ? 40 : 10);
430  for (j = 0; j < s->vlc_n; j++) {
431  int d = FFMIN(j, s->vlc_n - j);
432  s->stats[i][j] = pels/(d*d + 1);
433  }
434  }
435  } else {
436  for (i = 0; i < 4; i++)
437  for (j = 0; j < s->vlc_n; j++)
438  s->stats[i][j]= 0;
439  }
440 
441  if (ff_huffyuv_alloc_temp(s)) {
443  return AVERROR(ENOMEM);
444  }
445 
446  s->picture_number=0;
447 
448  return 0;
449 }
451 {
452  int i;
453  const uint8_t *y = s->temp[0] + offset;
454  const uint8_t *u = s->temp[1] + offset / 2;
455  const uint8_t *v = s->temp[2] + offset / 2;
456 
457  if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) < 2 * 4 * count) {
458  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
459  return -1;
460  }
461 
462 #define LOAD4\
463  int y0 = y[2 * i];\
464  int y1 = y[2 * i + 1];\
465  int u0 = u[i];\
466  int v0 = v[i];
467 
468  count /= 2;
469 
470  if (s->flags & AV_CODEC_FLAG_PASS1) {
471  for(i = 0; i < count; i++) {
472  LOAD4;
473  s->stats[0][y0]++;
474  s->stats[1][u0]++;
475  s->stats[0][y1]++;
476  s->stats[2][v0]++;
477  }
478  }
480  return 0;
481  if (s->context) {
482  for (i = 0; i < count; i++) {
483  LOAD4;
484  s->stats[0][y0]++;
485  put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
486  s->stats[1][u0]++;
487  put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
488  s->stats[0][y1]++;
489  put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
490  s->stats[2][v0]++;
491  put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
492  }
493  } else {
494  for(i = 0; i < count; i++) {
495  LOAD4;
496  put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
497  put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
498  put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
499  put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
500  }
501  }
502  return 0;
503 }
504 
506 {
507  int i, count = width/2;
508 
509  if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) < count * s->bps / 2) {
510  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
511  return -1;
512  }
513 
514 #define LOADEND\
515  int y0 = s->temp[0][width-1];
516 #define LOADEND_14\
517  int y0 = s->temp16[0][width-1] & mask;
518 #define LOADEND_16\
519  int y0 = s->temp16[0][width-1];
520 #define STATEND\
521  s->stats[plane][y0]++;
522 #define STATEND_16\
523  s->stats[plane][y0>>2]++;
524 #define WRITEEND\
525  put_bits(&s->pb, s->len[plane][y0], s->bits[plane][y0]);
526 #define WRITEEND_16\
527  put_bits(&s->pb, s->len[plane][y0>>2], s->bits[plane][y0>>2]);\
528  put_bits(&s->pb, 2, y0&3);
529 
530 #define LOAD2\
531  int y0 = s->temp[0][2 * i];\
532  int y1 = s->temp[0][2 * i + 1];
533 #define LOAD2_14\
534  int y0 = s->temp16[0][2 * i] & mask;\
535  int y1 = s->temp16[0][2 * i + 1] & mask;
536 #define LOAD2_16\
537  int y0 = s->temp16[0][2 * i];\
538  int y1 = s->temp16[0][2 * i + 1];
539 #define STAT2\
540  s->stats[plane][y0]++;\
541  s->stats[plane][y1]++;
542 #define STAT2_16\
543  s->stats[plane][y0>>2]++;\
544  s->stats[plane][y1>>2]++;
545 #define WRITE2\
546  put_bits(&s->pb, s->len[plane][y0], s->bits[plane][y0]);\
547  put_bits(&s->pb, s->len[plane][y1], s->bits[plane][y1]);
548 #define WRITE2_16\
549  put_bits(&s->pb, s->len[plane][y0>>2], s->bits[plane][y0>>2]);\
550  put_bits(&s->pb, 2, y0&3);\
551  put_bits(&s->pb, s->len[plane][y1>>2], s->bits[plane][y1>>2]);\
552  put_bits(&s->pb, 2, y1&3);
553 
554  if (s->bps <= 8) {
555  if (s->flags & AV_CODEC_FLAG_PASS1) {
556  for (i = 0; i < count; i++) {
557  LOAD2;
558  STAT2;
559  }
560  if (width&1) {
561  LOADEND;
562  STATEND;
563  }
564  }
566  return 0;
567 
568  if (s->context) {
569  for (i = 0; i < count; i++) {
570  LOAD2;
571  STAT2;
572  WRITE2;
573  }
574  if (width&1) {
575  LOADEND;
576  STATEND;
577  WRITEEND;
578  }
579  } else {
580  for (i = 0; i < count; i++) {
581  LOAD2;
582  WRITE2;
583  }
584  if (width&1) {
585  LOADEND;
586  WRITEEND;
587  }
588  }
589  } else if (s->bps <= 14) {
590  int mask = s->n - 1;
591  if (s->flags & AV_CODEC_FLAG_PASS1) {
592  for (i = 0; i < count; i++) {
593  LOAD2_14;
594  STAT2;
595  }
596  if (width&1) {
597  LOADEND_14;
598  STATEND;
599  }
600  }
602  return 0;
603 
604  if (s->context) {
605  for (i = 0; i < count; i++) {
606  LOAD2_14;
607  STAT2;
608  WRITE2;
609  }
610  if (width&1) {
611  LOADEND_14;
612  STATEND;
613  WRITEEND;
614  }
615  } else {
616  for (i = 0; i < count; i++) {
617  LOAD2_14;
618  WRITE2;
619  }
620  if (width&1) {
621  LOADEND_14;
622  WRITEEND;
623  }
624  }
625  } else {
626  if (s->flags & AV_CODEC_FLAG_PASS1) {
627  for (i = 0; i < count; i++) {
628  LOAD2_16;
629  STAT2_16;
630  }
631  if (width&1) {
632  LOADEND_16;
633  STATEND_16;
634  }
635  }
637  return 0;
638 
639  if (s->context) {
640  for (i = 0; i < count; i++) {
641  LOAD2_16;
642  STAT2_16;
643  WRITE2_16;
644  }
645  if (width&1) {
646  LOADEND_16;
647  STATEND_16;
648  WRITEEND_16;
649  }
650  } else {
651  for (i = 0; i < count; i++) {
652  LOAD2_16;
653  WRITE2_16;
654  }
655  if (width&1) {
656  LOADEND_16;
657  WRITEEND_16;
658  }
659  }
660  }
661 #undef LOAD2
662 #undef STAT2
663 #undef WRITE2
664  return 0;
665 }
666 
668 {
669  int i;
670 
671  if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) < 4 * count) {
672  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
673  return -1;
674  }
675 
676 #define LOAD2\
677  int y0 = s->temp[0][2 * i];\
678  int y1 = s->temp[0][2 * i + 1];
679 #define STAT2\
680  s->stats[0][y0]++;\
681  s->stats[0][y1]++;
682 #define WRITE2\
683  put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
684  put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
685 
686  count /= 2;
687 
688  if (s->flags & AV_CODEC_FLAG_PASS1) {
689  for (i = 0; i < count; i++) {
690  LOAD2;
691  STAT2;
692  }
693  }
695  return 0;
696 
697  if (s->context) {
698  for (i = 0; i < count; i++) {
699  LOAD2;
700  STAT2;
701  WRITE2;
702  }
703  } else {
704  for (i = 0; i < count; i++) {
705  LOAD2;
706  WRITE2;
707  }
708  }
709  return 0;
710 }
711 
712 static inline int encode_bgra_bitstream(HYuvContext *s, int count, int planes)
713 {
714  int i;
715 
716  if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
717  4 * planes * count) {
718  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
719  return -1;
720  }
721 
722 #define LOAD_GBRA \
723  int g = s->temp[0][planes == 3 ? 3 * i + 1 : 4 * i + G]; \
724  int b =(s->temp[0][planes == 3 ? 3 * i + 2 : 4 * i + B] - g) & 0xFF;\
725  int r =(s->temp[0][planes == 3 ? 3 * i + 0 : 4 * i + R] - g) & 0xFF;\
726  int a = s->temp[0][planes * i + A];
727 
728 #define STAT_BGRA \
729  s->stats[0][b]++; \
730  s->stats[1][g]++; \
731  s->stats[2][r]++; \
732  if (planes == 4) \
733  s->stats[2][a]++;
734 
735 #define WRITE_GBRA \
736  put_bits(&s->pb, s->len[1][g], s->bits[1][g]); \
737  put_bits(&s->pb, s->len[0][b], s->bits[0][b]); \
738  put_bits(&s->pb, s->len[2][r], s->bits[2][r]); \
739  if (planes == 4) \
740  put_bits(&s->pb, s->len[2][a], s->bits[2][a]);
741 
742  if ((s->flags & AV_CODEC_FLAG_PASS1) &&
744  for (i = 0; i < count; i++) {
745  LOAD_GBRA;
746  STAT_BGRA;
747  }
748  } else if (s->context || (s->flags & AV_CODEC_FLAG_PASS1)) {
749  for (i = 0; i < count; i++) {
750  LOAD_GBRA;
751  STAT_BGRA;
752  WRITE_GBRA;
753  }
754  } else {
755  for (i = 0; i < count; i++) {
756  LOAD_GBRA;
757  WRITE_GBRA;
758  }
759  }
760  return 0;
761 }
762 
764  const AVFrame *pict, int *got_packet)
765 {
766  HYuvContext *s = avctx->priv_data;
767  const int width = s->width;
768  const int width2 = s->width>>1;
769  const int height = s->height;
770  const int fake_ystride = s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
771  const int fake_ustride = s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
772  const int fake_vstride = s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
773  const AVFrame * const p = pict;
774  int i, j, size = 0, ret;
775 
776  if ((ret = ff_alloc_packet2(avctx, pkt, width * height * 3 * 4 + AV_INPUT_BUFFER_MIN_SIZE, 0)) < 0)
777  return ret;
778 
779  if (s->context) {
780  size = store_huffman_tables(s, pkt->data);
781  if (size < 0)
782  return size;
783 
784  for (i = 0; i < 4; i++)
785  for (j = 0; j < s->vlc_n; j++)
786  s->stats[i][j] >>= 1;
787  }
788 
789  init_put_bits(&s->pb, pkt->data + size, pkt->size - size);
790 
791  if (avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
792  avctx->pix_fmt == AV_PIX_FMT_YUV420P) {
793  int lefty, leftu, leftv, y, cy;
794 
795  put_bits(&s->pb, 8, leftv = p->data[2][0]);
796  put_bits(&s->pb, 8, lefty = p->data[0][1]);
797  put_bits(&s->pb, 8, leftu = p->data[1][0]);
798  put_bits(&s->pb, 8, p->data[0][0]);
799 
800  lefty = sub_left_prediction(s, s->temp[0], p->data[0], width , 0);
801  leftu = sub_left_prediction(s, s->temp[1], p->data[1], width2, 0);
802  leftv = sub_left_prediction(s, s->temp[2], p->data[2], width2, 0);
803 
804  encode_422_bitstream(s, 2, width-2);
805 
806  if (s->predictor==MEDIAN) {
807  int lefttopy, lefttopu, lefttopv;
808  cy = y = 1;
809  if (s->interlaced) {
810  lefty = sub_left_prediction(s, s->temp[0], p->data[0] + p->linesize[0], width , lefty);
811  leftu = sub_left_prediction(s, s->temp[1], p->data[1] + p->linesize[1], width2, leftu);
812  leftv = sub_left_prediction(s, s->temp[2], p->data[2] + p->linesize[2], width2, leftv);
813 
814  encode_422_bitstream(s, 0, width);
815  y++; cy++;
816  }
817 
818  lefty = sub_left_prediction(s, s->temp[0], p->data[0] + fake_ystride, 4, lefty);
819  leftu = sub_left_prediction(s, s->temp[1], p->data[1] + fake_ustride, 2, leftu);
820  leftv = sub_left_prediction(s, s->temp[2], p->data[2] + fake_vstride, 2, leftv);
821 
822  encode_422_bitstream(s, 0, 4);
823 
824  lefttopy = p->data[0][3];
825  lefttopu = p->data[1][1];
826  lefttopv = p->data[2][1];
827  s->llvidencdsp.sub_median_pred(s->temp[0], p->data[0] + 4, p->data[0] + fake_ystride + 4, width - 4, &lefty, &lefttopy);
828  s->llvidencdsp.sub_median_pred(s->temp[1], p->data[1] + 2, p->data[1] + fake_ustride + 2, width2 - 2, &leftu, &lefttopu);
829  s->llvidencdsp.sub_median_pred(s->temp[2], p->data[2] + 2, p->data[2] + fake_vstride + 2, width2 - 2, &leftv, &lefttopv);
830  encode_422_bitstream(s, 0, width - 4);
831  y++; cy++;
832 
833  for (; y < height; y++,cy++) {
834  uint8_t *ydst, *udst, *vdst;
835 
836  if (s->bitstream_bpp == 12) {
837  while (2 * cy > y) {
838  ydst = p->data[0] + p->linesize[0] * y;
839  s->llvidencdsp.sub_median_pred(s->temp[0], ydst - fake_ystride, ydst, width, &lefty, &lefttopy);
840  encode_gray_bitstream(s, width);
841  y++;
842  }
843  if (y >= height) break;
844  }
845  ydst = p->data[0] + p->linesize[0] * y;
846  udst = p->data[1] + p->linesize[1] * cy;
847  vdst = p->data[2] + p->linesize[2] * cy;
848 
849  s->llvidencdsp.sub_median_pred(s->temp[0], ydst - fake_ystride, ydst, width, &lefty, &lefttopy);
850  s->llvidencdsp.sub_median_pred(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
851  s->llvidencdsp.sub_median_pred(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
852 
853  encode_422_bitstream(s, 0, width);
854  }
855  } else {
856  for (cy = y = 1; y < height; y++, cy++) {
857  uint8_t *ydst, *udst, *vdst;
858 
859  /* encode a luma only line & y++ */
860  if (s->bitstream_bpp == 12) {
861  ydst = p->data[0] + p->linesize[0] * y;
862 
863  if (s->predictor == PLANE && s->interlaced < y) {
864  s->llvidencdsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
865 
866  lefty = sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
867  } else {
868  lefty = sub_left_prediction(s, s->temp[0], ydst, width , lefty);
869  }
870  encode_gray_bitstream(s, width);
871  y++;
872  if (y >= height) break;
873  }
874 
875  ydst = p->data[0] + p->linesize[0] * y;
876  udst = p->data[1] + p->linesize[1] * cy;
877  vdst = p->data[2] + p->linesize[2] * cy;
878 
879  if (s->predictor == PLANE && s->interlaced < cy) {
880  s->llvidencdsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
881  s->llvidencdsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
882  s->llvidencdsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
883 
884  lefty = sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
885  leftu = sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
886  leftv = sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
887  } else {
888  lefty = sub_left_prediction(s, s->temp[0], ydst, width , lefty);
889  leftu = sub_left_prediction(s, s->temp[1], udst, width2, leftu);
890  leftv = sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
891  }
892 
893  encode_422_bitstream(s, 0, width);
894  }
895  }
896  } else if(avctx->pix_fmt == AV_PIX_FMT_RGB32) {
897  uint8_t *data = p->data[0] + (height - 1) * p->linesize[0];
898  const int stride = -p->linesize[0];
899  const int fake_stride = -fake_ystride;
900  int y;
901  int leftr, leftg, leftb, lefta;
902 
903  put_bits(&s->pb, 8, lefta = data[A]);
904  put_bits(&s->pb, 8, leftr = data[R]);
905  put_bits(&s->pb, 8, leftg = data[G]);
906  put_bits(&s->pb, 8, leftb = data[B]);
907 
908  sub_left_prediction_bgr32(s, s->temp[0], data + 4, width - 1,
909  &leftr, &leftg, &leftb, &lefta);
910  encode_bgra_bitstream(s, width - 1, 4);
911 
912  for (y = 1; y < s->height; y++) {
913  uint8_t *dst = data + y*stride;
914  if (s->predictor == PLANE && s->interlaced < y) {
915  s->llvidencdsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width * 4);
916  sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width,
917  &leftr, &leftg, &leftb, &lefta);
918  } else {
919  sub_left_prediction_bgr32(s, s->temp[0], dst, width,
920  &leftr, &leftg, &leftb, &lefta);
921  }
922  encode_bgra_bitstream(s, width, 4);
923  }
924  } else if (avctx->pix_fmt == AV_PIX_FMT_RGB24) {
925  uint8_t *data = p->data[0] + (height - 1) * p->linesize[0];
926  const int stride = -p->linesize[0];
927  const int fake_stride = -fake_ystride;
928  int y;
929  int leftr, leftg, leftb;
930 
931  put_bits(&s->pb, 8, leftr = data[0]);
932  put_bits(&s->pb, 8, leftg = data[1]);
933  put_bits(&s->pb, 8, leftb = data[2]);
934  put_bits(&s->pb, 8, 0);
935 
936  sub_left_prediction_rgb24(s, s->temp[0], data + 3, width - 1,
937  &leftr, &leftg, &leftb);
938  encode_bgra_bitstream(s, width-1, 3);
939 
940  for (y = 1; y < s->height; y++) {
941  uint8_t *dst = data + y * stride;
942  if (s->predictor == PLANE && s->interlaced < y) {
943  s->llvidencdsp.diff_bytes(s->temp[1], dst, dst - fake_stride,
944  width * 3);
945  sub_left_prediction_rgb24(s, s->temp[0], s->temp[1], width,
946  &leftr, &leftg, &leftb);
947  } else {
948  sub_left_prediction_rgb24(s, s->temp[0], dst, width,
949  &leftr, &leftg, &leftb);
950  }
951  encode_bgra_bitstream(s, width, 3);
952  }
953  } else if (s->version > 2) {
954  int plane;
955  for (plane = 0; plane < 1 + 2*s->chroma + s->alpha; plane++) {
956  int left, y;
957  int w = width;
958  int h = height;
959  int fake_stride = fake_ystride;
960 
961  if (s->chroma && (plane == 1 || plane == 2)) {
962  w >>= s->chroma_h_shift;
963  h >>= s->chroma_v_shift;
964  fake_stride = plane == 1 ? fake_ustride : fake_vstride;
965  }
966 
967  left = sub_left_prediction(s, s->temp[0], p->data[plane], w , 0);
968 
969  encode_plane_bitstream(s, w, plane);
970 
971  if (s->predictor==MEDIAN) {
972  int lefttop;
973  y = 1;
974  if (s->interlaced) {
975  left = sub_left_prediction(s, s->temp[0], p->data[plane] + p->linesize[plane], w , left);
976 
977  encode_plane_bitstream(s, w, plane);
978  y++;
979  }
980 
981  lefttop = p->data[plane][0];
982 
983  for (; y < h; y++) {
984  uint8_t *dst = p->data[plane] + p->linesize[plane] * y;
985 
986  sub_median_prediction(s, s->temp[0], dst - fake_stride, dst, w , &left, &lefttop);
987 
988  encode_plane_bitstream(s, w, plane);
989  }
990  } else {
991  for (y = 1; y < h; y++) {
992  uint8_t *dst = p->data[plane] + p->linesize[plane] * y;
993 
994  if (s->predictor == PLANE && s->interlaced < y) {
995  diff_bytes(s, s->temp[1], dst, dst - fake_stride, w);
996 
997  left = sub_left_prediction(s, s->temp[0], s->temp[1], w , left);
998  } else {
999  left = sub_left_prediction(s, s->temp[0], dst, w , left);
1000  }
1001 
1002  encode_plane_bitstream(s, w, plane);
1003  }
1004  }
1005  }
1006  } else {
1007  av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
1008  }
1009  emms_c();
1010 
1011  size += (put_bits_count(&s->pb) + 31) / 8;
1012  put_bits(&s->pb, 16, 0);
1013  put_bits(&s->pb, 15, 0);
1014  size /= 4;
1015 
1016  if ((s->flags & AV_CODEC_FLAG_PASS1) && (s->picture_number & 31) == 0) {
1017  int j;
1018  char *p = avctx->stats_out;
1019  char *end = p + STATS_OUT_SIZE;
1020  for (i = 0; i < 4; i++) {
1021  for (j = 0; j < s->vlc_n; j++) {
1022  snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
1023  p += strlen(p);
1024  s->stats[i][j]= 0;
1025  }
1026  snprintf(p, end-p, "\n");
1027  p++;
1028  if (end <= p)
1029  return AVERROR(ENOMEM);
1030  }
1031  } else if (avctx->stats_out)
1032  avctx->stats_out[0] = '\0';
1033  if (!(s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)) {
1034  flush_put_bits(&s->pb);
1035  s->bdsp.bswap_buf((uint32_t *) pkt->data, (uint32_t *) pkt->data, size);
1036  }
1037 
1038  s->picture_number++;
1039 
1040  pkt->size = size * 4;
1041  pkt->flags |= AV_PKT_FLAG_KEY;
1042  *got_packet = 1;
1043 
1044  return 0;
1045 }
1046 
1048 {
1049  HYuvContext *s = avctx->priv_data;
1050 
1052 
1053  av_freep(&avctx->extradata);
1054  av_freep(&avctx->stats_out);
1055 
1056  return 0;
1057 }
1058 
1059 #define OFFSET(x) offsetof(HYuvContext, x)
1060 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
1061 
1062 #define COMMON_OPTIONS \
1063  { "non_deterministic", "Allow multithreading for e.g. context=1 at the expense of determinism", \
1064  OFFSET(non_determ), AV_OPT_TYPE_BOOL, { .i64 = 1 }, \
1065  0, 1, VE }, \
1066  { "pred", "Prediction method", OFFSET(predictor), AV_OPT_TYPE_INT, { .i64 = LEFT }, LEFT, MEDIAN, VE, "pred" }, \
1067  { "left", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LEFT }, INT_MIN, INT_MAX, VE, "pred" }, \
1068  { "plane", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PLANE }, INT_MIN, INT_MAX, VE, "pred" }, \
1069  { "median", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MEDIAN }, INT_MIN, INT_MAX, VE, "pred" }, \
1070 
1071 static const AVOption normal_options[] = {
1073  { NULL },
1074 };
1075 
1076 static const AVOption ff_options[] = {
1078  { "context", "Set per-frame huffman tables", OFFSET(context), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
1079  { NULL },
1080 };
1081 
1082 static const AVClass normal_class = {
1083  .class_name = "huffyuv",
1084  .item_name = av_default_item_name,
1085  .option = normal_options,
1086  .version = LIBAVUTIL_VERSION_INT,
1087 };
1088 
1089 static const AVClass ff_class = {
1090  .class_name = "ffvhuff",
1091  .item_name = av_default_item_name,
1092  .option = ff_options,
1093  .version = LIBAVUTIL_VERSION_INT,
1094 };
1095 
1097  .name = "huffyuv",
1098  .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1099  .type = AVMEDIA_TYPE_VIDEO,
1100  .id = AV_CODEC_ID_HUFFYUV,
1101  .priv_data_size = sizeof(HYuvContext),
1102  .init = encode_init,
1103  .encode2 = encode_frame,
1104  .close = encode_end,
1106  .priv_class = &normal_class,
1107  .pix_fmts = (const enum AVPixelFormat[]){
1110  },
1111  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
1113 };
1114 
1115 #if CONFIG_FFVHUFF_ENCODER
1116 AVCodec ff_ffvhuff_encoder = {
1117  .name = "ffvhuff",
1118  .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1119  .type = AVMEDIA_TYPE_VIDEO,
1120  .id = AV_CODEC_ID_FFVHUFF,
1121  .priv_data_size = sizeof(HYuvContext),
1122  .init = encode_init,
1123  .encode2 = encode_frame,
1124  .close = encode_end,
1126  .priv_class = &ff_class,
1127  .pix_fmts = (const enum AVPixelFormat[]){
1143  },
1144  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
1146 };
1147 #endif
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:48
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:938
int plane
Definition: avisynth_c.h:422
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
Definition: avcodec.h:2986
#define NULL
Definition: coverity.c:32
const struct AVCodec * codec
Definition: avcodec.h:1770
const char const char void * val
Definition: avisynth_c.h:771
#define AV_PIX_FMT_YUVA422P16
Definition: pixfmt.h:422
const char * s
Definition: avisynth_c.h:768
#define AV_PIX_FMT_YUVA422P9
Definition: pixfmt.h:416
int size
static float alpha(float a)
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2419
This structure describes decoded (raw) audio or video data.
Definition: frame.h:201
static av_cold int encode_init(AVCodecContext *avctx)
Definition: huffyuvenc.c:213
AVOption.
Definition: opt.h:246
#define AV_PIX_FMT_YUVA420P10
Definition: pixfmt.h:418
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:393
#define AV_PIX_FMT_YUVA422P10
Definition: pixfmt.h:419
int bitstream_bpp
Definition: huffyuv.h:63
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:206
#define LIBAVUTIL_VERSION_INT
Definition: version.h:86
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
#define STATEND
else temp
Definition: vf_mcdeint.c:256
const char * g
Definition: vf_curves.c:112
Definition: vf_geq.c:47
const char * desc
Definition: nvenc.c:60
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:184
int size
Definition: avcodec.h:1680
const char * b
Definition: vf_curves.c:113
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:399
static int encode_plane_bitstream(HYuvContext *s, int width, int plane)
Definition: huffyuvenc.c:505
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1989
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:387
char * stats_in
pass2 encoding statistics input buffer Concatenated stuff from stats_out of pass1 should be placed he...
Definition: avcodec.h:2939
#define MAX_VLC_N
Definition: huffyuv.h:47
int context
Definition: huffyuv.h:77
static AVPacket pkt
#define src
Definition: vp8dsp.c:254
LLVidEncDSPContext llvidencdsp
Definition: huffyuv.h:93
int stride
Definition: mace.c:144
AVCodec.
Definition: avcodec.h:3739
int height
Definition: huffyuv.h:75
#define AV_CODEC_CAP_INTRA_ONLY
Codec is intra only.
Definition: avcodec.h:1095
#define LOAD_GBRA
av_cold void ff_huffyuvencdsp_init(HuffYUVEncDSPContext *c, AVCodecContext *avctx)
Definition: huffyuvencdsp.c:71
av_cold void ff_llvidencdsp_init(LLVidEncDSPContext *c)
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
Definition: encode.c:32
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:40
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:106
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
uint8_t
#define av_cold
Definition: attributes.h:82
#define AV_PIX_FMT_FLAG_ALPHA
The pixel format has an alpha channel.
Definition: pixdesc.h:173
AVOptions.
void(* bswap_buf)(uint32_t *dst, const uint32_t *src, int w)
Definition: bswapdsp.h:25
#define STATEND_16
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
int bps
Definition: huffyuv.h:67
#define emms_c()
Definition: internal.h:54
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1876
#define AV_PIX_FMT_YUVA420P9
Definition: pixfmt.h:415
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:398
static void sub_median_prediction(HYuvContext *s, uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int w, int *left, int *left_top)
Definition: huffyuvenc.c:157
void(* diff_bytes)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, intptr_t w)
const char data[16]
Definition: mxf.c:90
#define height
uint8_t * data
Definition: avcodec.h:1679
attribute_deprecated int context_model
Definition: avcodec.h:2821
#define STATS_OUT_SIZE
int vlc_n
Definition: huffyuv.h:69
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:396
static void sub_left_prediction_rgb24(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue)
Definition: huffyuvenc.c:129
void(* sub_hfyu_median_pred_int16)(uint16_t *dst, const uint16_t *src1, const uint16_t *src2, unsigned mask, int w, int *left, int *left_top)
Definition: huffyuvencdsp.h:32
int chroma_h_shift
Definition: huffyuv.h:73
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:3157
#define LOAD2
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:388
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:2931
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:421
#define A(x)
Definition: vp56_arith.h:28
#define AV_INPUT_BUFFER_MIN_SIZE
minimum encoding buffer size Used to avoid some checks during header writing.
Definition: avcodec.h:784
#define av_log(a,...)
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: avcodec.h:1711
uint8_t len[4][MAX_VLC_N]
Definition: huffyuv.h:83
#define MAX_N
Definition: huffyuv.h:46
static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pict, int *got_packet)
Definition: huffyuvenc.c:763
#define LOAD2_14
av_cold int ff_huffyuv_alloc_temp(HYuvContext *s)
Definition: huffyuv.c:58
enum AVCodecID id
Definition: avcodec.h:3753
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:192
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
void(* sub_median_pred)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, intptr_t w, int *left, int *left_top)
Subtract HuffYUV&#39;s variant of median prediction.
int chroma_v_shift
Definition: huffyuv.h:74
Definition: huffyuv.h:51
av_cold void ff_huffyuv_common_end(HYuvContext *s)
Definition: huffyuv.c:86
static const uint16_t mask[17]
Definition: lzw.c:38
av_default_item_name
#define AVERROR(e)
Definition: error.h:43
int flags
Definition: huffyuv.h:76
#define AV_PIX_FMT_FLAG_RGB
The pixel format contains RGB-like data (as opposed to YUV/grayscale).
Definition: pixdesc.h:148
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2447
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:181
const char * r
Definition: vf_curves.c:111
static const AVClass ff_class
Definition: huffyuvenc.c:1089
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:423
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1856
uint8_t * buf
Definition: put_bits.h:38
uint16_t width
Definition: gdv.c:47
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:222
const char * name
Name of the codec implementation.
Definition: avcodec.h:3746
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:386
static const AVClass normal_class
Definition: huffyuvenc.c:1082
int chroma
Definition: huffyuv.h:71
static const uint8_t offset[127][2]
Definition: vf_spp.c:92
#define COMMON_OPTIONS
Definition: huffyuvenc.c:1062
huffyuv codec for libavcodec.
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: avcodec.h:1065
int flags
A combination of AV_PKT_FLAG values.
Definition: avcodec.h:1685
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:85
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
#define WRITE_GBRA
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:106
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:381
uint8_t nb_components
The number of components each pixel has, (1-4)
Definition: pixdesc.h:83
#define WRITE2
int decorrelate
Definition: huffyuv.h:62
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:284
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:402
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:367
#define FFMIN(a, b)
Definition: common.h:96
int width
Definition: huffyuv.h:75
AVCodec ff_huffyuv_encoder
Definition: huffyuvenc.c:1096
void(* diff_int16)(uint16_t *dst, const uint16_t *src1, const uint16_t *src2, unsigned mask, int w)
Definition: huffyuvencdsp.h:27
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:892
int ff_huffyuv_generate_bits_table(uint32_t *dst, const uint8_t *len_table, int n)
Definition: huffyuv.c:39
int n
Definition: avisynth_c.h:684
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:420
uint8_t * temp[3]
Definition: huffyuv.h:80
static av_cold int encode_end(AVCodecContext *avctx)
Definition: huffyuvenc.c:1047
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:382
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:401
int alpha
Definition: huffyuv.h:70
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:394
#define src1
Definition: h264pred.c:139
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:391
int picture_number
Definition: huffyuv.h:78
int ff_huff_gen_len_table(uint8_t *dst, const uint64_t *stats, int stats_size, int skip0)
Definition: huffman.c:58
static int sub_left_prediction(HYuvContext *s, uint8_t *dst, const uint8_t *src, int w, int left)
Definition: huffyuvenc.c:51
Libavcodec external API header.
static int encode_422_bitstream(HYuvContext *s, int offset, int count)
Definition: huffyuvenc.c:450
attribute_deprecated int prediction_method
Definition: avcodec.h:2152
int yuv
Definition: huffyuv.h:72
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:232
static const AVOption ff_options[]
Definition: huffyuvenc.c:1076
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:193
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
main external API structure.
Definition: avcodec.h:1761
Definition: vf_geq.c:47
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:357
#define STAT2
uint8_t * buf_end
Definition: put_bits.h:38
void * buf
Definition: avisynth_c.h:690
int interlaced
Definition: huffyuv.h:61
int extradata_size
Definition: avcodec.h:1877
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:383
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
Describe the class of an AVClass context structure.
Definition: log.h:67
int index
Definition: gxfenc.c:89
#define WRITEEND
Definition: vf_geq.c:47
huffman tree builder and VLC generator
#define STAT2_16
#define src0
Definition: h264pred.c:138
static int encode_bgra_bitstream(HYuvContext *s, int count, int planes)
Definition: huffyuvenc.c:712
#define LOAD4
#define STAT_BGRA
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:380
#define snprintf
Definition: snprintf.h:34
#define u(width,...)
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:266
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:392
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:400
HuffYUVEncDSPContext hencdsp
Definition: huffyuv.h:91
int version
Definition: huffyuv.h:64
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:384
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:390
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:215
Predictor predictor
Definition: huffyuv.h:58
static void diff_bytes(HYuvContext *s, uint8_t *dst, const uint8_t *src0, const uint8_t *src1, int w)
Definition: huffyuvenc.c:41
#define v0
Definition: regdef.h:26
AVCodecContext * avctx
Definition: huffyuv.h:57
#define WRITE2_16
PutBitContext pb
Definition: huffyuv.h:60
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
Y , 8bpp.
Definition: pixfmt.h:74
Definition: huffyuv.h:52
#define OFFSET(x)
Definition: huffyuvenc.c:1059
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:83
common internal api header.
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:101
static int store_huffman_tables(HYuvContext *s, uint8_t *buf)
Definition: huffyuvenc.c:191
#define VE
Definition: huffyuvenc.c:1060
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:233
static int store_table(HYuvContext *s, const uint8_t *len, uint8_t *buf)
Definition: huffyuvenc.c:166
#define LOAD2_16
#define AV_CODEC_FLAG2_NO_OUTPUT
Skip bitstream encoding.
Definition: avcodec.h:948
#define AV_PIX_FMT_YUVA444P9
Definition: pixfmt.h:417
attribute_deprecated AVFrame * coded_frame
the picture in the bitstream
Definition: avcodec.h:3183
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
static int encode_gray_bitstream(HYuvContext *s, int count)
Definition: huffyuvenc.c:667
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:48
static void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst, const uint8_t *src, int w, int *red, int *green, int *blue, int *alpha)
Definition: huffyuvenc.c:94
av_cold void ff_huffyuv_common_init(AVCodecContext *avctx)
Definition: huffyuv.c:71
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:896
void * priv_data
Definition: avcodec.h:1803
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:84
int len
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:279
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:1863
#define WRITEEND_16
#define LOADEND_16
#define av_freep(p)
void INT64 INT64 count
Definition: avisynth_c.h:690
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:104
#define LOADEND
uint32_t bits[4][MAX_VLC_N]
Definition: huffyuv.h:84
uint64_t stats[4][MAX_VLC_N]
Definition: huffyuv.h:82
static const AVOption normal_options[]
Definition: huffyuvenc.c:1071
int depth
Number of bits in the component.
Definition: pixdesc.h:58
#define LOADEND_14
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
This structure stores compressed data.
Definition: avcodec.h:1656
BswapDSPContext bdsp
Definition: huffyuv.h:89
#define AV_PIX_FMT_FLAG_PLANAR
At least one pixel component is not in the first data plane.
Definition: pixdesc.h:144
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:2981
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:395
for(j=16;j >0;--j)
bitstream writer API