FFmpeg  2.8.17
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
h264.c
Go to the documentation of this file.
1 /*
2  * H.26L/H.264/AVC/JVT/14496-10/... decoder
3  * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * H.264 / AVC / MPEG4 part10 codec.
25  * @author Michael Niedermayer <michaelni@gmx.at>
26  */
27 
28 #define UNCHECKED_BITSTREAM_READER 1
29 
30 #include "libavutil/avassert.h"
31 #include "libavutil/display.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/opt.h"
34 #include "libavutil/stereo3d.h"
35 #include "libavutil/timer.h"
36 #include "internal.h"
37 #include "cabac.h"
38 #include "cabac_functions.h"
39 #include "error_resilience.h"
40 #include "avcodec.h"
41 #include "h264.h"
42 #include "h264data.h"
43 #include "h264chroma.h"
44 #include "h264_mvpred.h"
45 #include "golomb.h"
46 #include "mathops.h"
47 #include "me_cmp.h"
48 #include "mpegutils.h"
49 #include "rectangle.h"
50 #include "svq3.h"
51 #include "thread.h"
52 #include "vdpau_compat.h"
53 
54 const uint16_t ff_h264_mb_sizes[4] = { 256, 384, 512, 768 };
55 
57 {
58  H264Context *h = avctx->priv_data;
59  return h ? h->sps.num_reorder_frames : 0;
60 }
61 
62 static void h264_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
63  int (*mv)[2][4][2],
64  int mb_x, int mb_y, int mb_intra, int mb_skipped)
65 {
66  H264Context *h = opaque;
67  H264SliceContext *sl = &h->slice_ctx[0];
68 
69  sl->mb_x = mb_x;
70  sl->mb_y = mb_y;
71  sl->mb_xy = mb_x + mb_y * h->mb_stride;
72  memset(sl->non_zero_count_cache, 0, sizeof(sl->non_zero_count_cache));
73  av_assert1(ref >= 0);
74  /* FIXME: It is possible albeit uncommon that slice references
75  * differ between slices. We take the easy approach and ignore
76  * it for now. If this turns out to have any relevance in
77  * practice then correct remapping should be added. */
78  if (ref >= sl->ref_count[0])
79  ref = 0;
80  if (!sl->ref_list[0][ref].data[0]) {
81  av_log(h->avctx, AV_LOG_DEBUG, "Reference not available for error concealing\n");
82  ref = 0;
83  }
84  if ((sl->ref_list[0][ref].reference&3) != 3) {
85  av_log(h->avctx, AV_LOG_DEBUG, "Reference invalid\n");
86  return;
87  }
88  fill_rectangle(&h->cur_pic.ref_index[0][4 * sl->mb_xy],
89  2, 2, 2, ref, 1);
90  fill_rectangle(&sl->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);
91  fill_rectangle(sl->mv_cache[0][scan8[0]], 4, 4, 8,
92  pack16to32((*mv)[0][0][0], (*mv)[0][0][1]), 4);
93  sl->mb_mbaff =
94  sl->mb_field_decoding_flag = 0;
96 }
97 
99  int y, int height)
100 {
101  AVCodecContext *avctx = h->avctx;
102  const AVFrame *src = h->cur_pic.f;
103  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
104  int vshift = desc->log2_chroma_h;
105  const int field_pic = h->picture_structure != PICT_FRAME;
106  if (field_pic) {
107  height <<= 1;
108  y <<= 1;
109  }
110 
111  height = FFMIN(height, avctx->height - y);
112 
113  if (field_pic && h->first_field && !(avctx->slice_flags & SLICE_FLAG_ALLOW_FIELD))
114  return;
115 
116  if (avctx->draw_horiz_band) {
118  int i;
119 
120  offset[0] = y * src->linesize[0];
121  offset[1] =
122  offset[2] = (y >> vshift) * src->linesize[1];
123  for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
124  offset[i] = 0;
125 
126  emms_c();
127 
128  avctx->draw_horiz_band(avctx, src, offset,
129  y, h->picture_structure, height);
130  }
131 }
132 
133 /**
134  * Check if the top & left blocks are available if needed and
135  * change the dc mode so it only uses the available blocks.
136  */
138 {
139  static const int8_t top[12] = {
140  -1, 0, LEFT_DC_PRED, -1, -1, -1, -1, -1, 0
141  };
142  static const int8_t left[12] = {
143  0, -1, TOP_DC_PRED, 0, -1, -1, -1, 0, -1, DC_128_PRED
144  };
145  int i;
146 
147  if (!(sl->top_samples_available & 0x8000)) {
148  for (i = 0; i < 4; i++) {
149  int status = top[sl->intra4x4_pred_mode_cache[scan8[0] + i]];
150  if (status < 0) {
152  "top block unavailable for requested intra4x4 mode %d at %d %d\n",
153  status, sl->mb_x, sl->mb_y);
154  return AVERROR_INVALIDDATA;
155  } else if (status) {
156  sl->intra4x4_pred_mode_cache[scan8[0] + i] = status;
157  }
158  }
159  }
160 
161  if ((sl->left_samples_available & 0x8888) != 0x8888) {
162  static const int mask[4] = { 0x8000, 0x2000, 0x80, 0x20 };
163  for (i = 0; i < 4; i++)
164  if (!(sl->left_samples_available & mask[i])) {
165  int status = left[sl->intra4x4_pred_mode_cache[scan8[0] + 8 * i]];
166  if (status < 0) {
168  "left block unavailable for requested intra4x4 mode %d at %d %d\n",
169  status, sl->mb_x, sl->mb_y);
170  return AVERROR_INVALIDDATA;
171  } else if (status) {
172  sl->intra4x4_pred_mode_cache[scan8[0] + 8 * i] = status;
173  }
174  }
175  }
176 
177  return 0;
178 } // FIXME cleanup like ff_h264_check_intra_pred_mode
179 
180 /**
181  * Check if the top & left blocks are available if needed and
182  * change the dc mode so it only uses the available blocks.
183  */
185  int mode, int is_chroma)
186 {
187  static const int8_t top[4] = { LEFT_DC_PRED8x8, 1, -1, -1 };
188  static const int8_t left[5] = { TOP_DC_PRED8x8, -1, 2, -1, DC_128_PRED8x8 };
189 
190  if (mode > 3U) {
192  "out of range intra chroma pred mode at %d %d\n",
193  sl->mb_x, sl->mb_y);
194  return AVERROR_INVALIDDATA;
195  }
196 
197  if (!(sl->top_samples_available & 0x8000)) {
198  mode = top[mode];
199  if (mode < 0) {
201  "top block unavailable for requested intra mode at %d %d\n",
202  sl->mb_x, sl->mb_y);
203  return AVERROR_INVALIDDATA;
204  }
205  }
206 
207  if ((sl->left_samples_available & 0x8080) != 0x8080) {
208  mode = left[mode];
209  if (mode < 0) {
211  "left block unavailable for requested intra mode at %d %d\n",
212  sl->mb_x, sl->mb_y);
213  return AVERROR_INVALIDDATA;
214  }
215  if (is_chroma && (sl->left_samples_available & 0x8080)) {
216  // mad cow disease mode, aka MBAFF + constrained_intra_pred
217  mode = ALZHEIMER_DC_L0T_PRED8x8 +
218  (!(sl->left_samples_available & 0x8000)) +
219  2 * (mode == DC_128_PRED8x8);
220  }
221  }
222 
223  return mode;
224 }
225 
227  const uint8_t *src,
228  int *dst_length, int *consumed, int length)
229 {
230  int i, si, di;
231  uint8_t *dst;
232 
233  // src[0]&0x80; // forbidden bit
234  h->nal_ref_idc = src[0] >> 5;
235  h->nal_unit_type = src[0] & 0x1F;
236 
237  src++;
238  length--;
239 
240 #define STARTCODE_TEST \
241  if (i + 2 < length && src[i + 1] == 0 && src[i + 2] <= 3) { \
242  if (src[i + 2] != 3 && src[i + 2] != 0) { \
243  /* startcode, so we must be past the end */ \
244  length = i; \
245  } \
246  break; \
247  }
248 
249 #if HAVE_FAST_UNALIGNED
250 #define FIND_FIRST_ZERO \
251  if (i > 0 && !src[i]) \
252  i--; \
253  while (src[i]) \
254  i++
255 
256 #if HAVE_FAST_64BIT
257  for (i = 0; i + 1 < length; i += 9) {
258  if (!((~AV_RN64A(src + i) &
259  (AV_RN64A(src + i) - 0x0100010001000101ULL)) &
260  0x8000800080008080ULL))
261  continue;
262  FIND_FIRST_ZERO;
264  i -= 7;
265  }
266 #else
267  for (i = 0; i + 1 < length; i += 5) {
268  if (!((~AV_RN32A(src + i) &
269  (AV_RN32A(src + i) - 0x01000101U)) &
270  0x80008080U))
271  continue;
272  FIND_FIRST_ZERO;
274  i -= 3;
275  }
276 #endif
277 #else
278  for (i = 0; i + 1 < length; i += 2) {
279  if (src[i])
280  continue;
281  if (i > 0 && src[i - 1] == 0)
282  i--;
284  }
285 #endif
286 
288  dst = sl->rbsp_buffer;
289 
290  if (!dst)
291  return NULL;
292 
293  if(i>=length-1){ //no escaped 0
294  *dst_length= length;
295  *consumed= length+1; //+1 for the header
296 
297  memcpy(dst, src, length);
298  return dst;
299  }
300 
301  memcpy(dst, src, i);
302  si = di = i;
303  while (si + 2 < length) {
304  // remove escapes (very rare 1:2^22)
305  if (src[si + 2] > 3) {
306  dst[di++] = src[si++];
307  dst[di++] = src[si++];
308  } else if (src[si] == 0 && src[si + 1] == 0 && src[si + 2] != 0) {
309  if (src[si + 2] == 3) { // escape
310  dst[di++] = 0;
311  dst[di++] = 0;
312  si += 3;
313  continue;
314  } else // next start code
315  goto nsc;
316  }
317 
318  dst[di++] = src[si++];
319  }
320  while (si < length)
321  dst[di++] = src[si++];
322 
323 nsc:
324  memset(dst + di, 0, AV_INPUT_BUFFER_PADDING_SIZE);
325 
326  *dst_length = di;
327  *consumed = si + 1; // +1 for the header
328  /* FIXME store exact number of bits in the getbitcontext
329  * (it is needed for decoding) */
330  return dst;
331 }
332 
333 /**
334  * Identify the exact end of the bitstream
335  * @return the length of the trailing, or 0 if damaged
336  */
338 {
339  int v = *src;
340  int r;
341 
342  ff_tlog(h->avctx, "rbsp trailing %X\n", v);
343 
344  for (r = 1; r < 9; r++) {
345  if (v & 1)
346  return r;
347  v >>= 1;
348  }
349  return 0;
350 }
351 
353 {
354  int i;
355 
358  av_freep(&h->cbp_table);
359  av_freep(&h->mvd_table[0]);
360  av_freep(&h->mvd_table[1]);
361  av_freep(&h->direct_table);
364  h->slice_table = NULL;
365  av_freep(&h->list_counts);
366 
367  av_freep(&h->mb2b_xy);
368  av_freep(&h->mb2br_xy);
369 
374 
375  for (i = 0; i < h->nb_slice_ctx; i++) {
376  H264SliceContext *sl = &h->slice_ctx[i];
377 
378  av_freep(&sl->dc_val_base);
379  av_freep(&sl->er.mb_index2xy);
381  av_freep(&sl->er.er_temp_buffer);
382 
385  av_freep(&sl->top_borders[0]);
386  av_freep(&sl->top_borders[1]);
387 
390  sl->top_borders_allocated[0] = 0;
391  sl->top_borders_allocated[1] = 0;
392  }
393 }
394 
396 {
397  const int big_mb_num = h->mb_stride * (h->mb_height + 1);
398  const int row_mb_num = 2*h->mb_stride*FFMAX(h->avctx->thread_count, 1);
399  int x, y;
400 
402  row_mb_num, 8 * sizeof(uint8_t), fail)
404 
406  big_mb_num * 48 * sizeof(uint8_t), fail)
408  (big_mb_num + h->mb_stride) * sizeof(*h->slice_table_base), fail)
410  big_mb_num * sizeof(uint16_t), fail)
412  big_mb_num * sizeof(uint8_t), fail)
414  row_mb_num, 16 * sizeof(uint8_t), fail);
416  row_mb_num, 16 * sizeof(uint8_t), fail);
417  h->slice_ctx[0].mvd_table[0] = h->mvd_table[0];
418  h->slice_ctx[0].mvd_table[1] = h->mvd_table[1];
419 
421  4 * big_mb_num * sizeof(uint8_t), fail);
423  big_mb_num * sizeof(uint8_t), fail)
424 
425  memset(h->slice_table_base, -1,
426  (big_mb_num + h->mb_stride) * sizeof(*h->slice_table_base));
427  h->slice_table = h->slice_table_base + h->mb_stride * 2 + 1;
428 
430  big_mb_num * sizeof(uint32_t), fail);
432  big_mb_num * sizeof(uint32_t), fail);
433  for (y = 0; y < h->mb_height; y++)
434  for (x = 0; x < h->mb_width; x++) {
435  const int mb_xy = x + y * h->mb_stride;
436  const int b_xy = 4 * x + 4 * y * h->b_stride;
437 
438  h->mb2b_xy[mb_xy] = b_xy;
439  h->mb2br_xy[mb_xy] = 8 * (FMO ? mb_xy : (mb_xy % (2 * h->mb_stride)));
440  }
441 
442  if (!h->dequant4_coeff[0])
444 
445  return 0;
446 
447 fail:
449  return AVERROR(ENOMEM);
450 }
451 
452 /**
453  * Init context
454  * Allocate buffers which are not shared amongst multiple threads.
455  */
457 {
458  ERContext *er = &sl->er;
459  int mb_array_size = h->mb_height * h->mb_stride;
460  int y_size = (2 * h->mb_width + 1) * (2 * h->mb_height + 1);
461  int c_size = h->mb_stride * (h->mb_height + 1);
462  int yc_size = y_size + 2 * c_size;
463  int x, y, i;
464 
465  sl->ref_cache[0][scan8[5] + 1] =
466  sl->ref_cache[0][scan8[7] + 1] =
467  sl->ref_cache[0][scan8[13] + 1] =
468  sl->ref_cache[1][scan8[5] + 1] =
469  sl->ref_cache[1][scan8[7] + 1] =
470  sl->ref_cache[1][scan8[13] + 1] = PART_NOT_AVAILABLE;
471 
472  if (sl != h->slice_ctx) {
473  memset(er, 0, sizeof(*er));
474  } else
476 
477  /* init ER */
478  er->avctx = h->avctx;
480  er->opaque = h;
481  er->quarter_sample = 1;
482 
483  er->mb_num = h->mb_num;
484  er->mb_width = h->mb_width;
485  er->mb_height = h->mb_height;
486  er->mb_stride = h->mb_stride;
487  er->b8_stride = h->mb_width * 2 + 1;
488 
489  // error resilience code looks cleaner with this
491  (h->mb_num + 1) * sizeof(int), fail);
492 
493  for (y = 0; y < h->mb_height; y++)
494  for (x = 0; x < h->mb_width; x++)
495  er->mb_index2xy[x + y * h->mb_width] = x + y * h->mb_stride;
496 
497  er->mb_index2xy[h->mb_height * h->mb_width] = (h->mb_height - 1) *
498  h->mb_stride + h->mb_width;
499 
501  mb_array_size * sizeof(uint8_t), fail);
502 
504  h->mb_height * h->mb_stride, fail);
505 
507  yc_size * sizeof(int16_t), fail);
508  er->dc_val[0] = sl->dc_val_base + h->mb_width * 2 + 2;
509  er->dc_val[1] = sl->dc_val_base + y_size + h->mb_stride + 1;
510  er->dc_val[2] = er->dc_val[1] + c_size;
511  for (i = 0; i < yc_size; i++)
512  sl->dc_val_base[i] = 1024;
513  }
514 
515  return 0;
516 
517 fail:
518  return AVERROR(ENOMEM); // ff_h264_free_tables will clean up for us
519 }
520 
521 static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size,
522  int parse_extradata);
523 
525 {
526  AVCodecContext *avctx = h->avctx;
527  int ret;
528 
529  if (!buf || size <= 0)
530  return -1;
531 
532  if (buf[0] == 1) {
533  int i, cnt, nalsize;
534  const unsigned char *p = buf;
535 
536  h->is_avc = 1;
537 
538  if (size < 7) {
539  av_log(avctx, AV_LOG_ERROR,
540  "avcC %d too short\n", size);
541  return AVERROR_INVALIDDATA;
542  }
543  /* sps and pps in the avcC always have length coded with 2 bytes,
544  * so put a fake nal_length_size = 2 while parsing them */
545  h->nal_length_size = 2;
546  // Decode sps from avcC
547  cnt = *(p + 5) & 0x1f; // Number of sps
548  p += 6;
549  for (i = 0; i < cnt; i++) {
550  nalsize = AV_RB16(p) + 2;
551  if(nalsize > size - (p-buf))
552  return AVERROR_INVALIDDATA;
553  ret = decode_nal_units(h, p, nalsize, 1);
554  if (ret < 0) {
555  av_log(avctx, AV_LOG_ERROR,
556  "Decoding sps %d from avcC failed\n", i);
557  return ret;
558  }
559  p += nalsize;
560  }
561  // Decode pps from avcC
562  cnt = *(p++); // Number of pps
563  for (i = 0; i < cnt; i++) {
564  nalsize = AV_RB16(p) + 2;
565  if(nalsize > size - (p-buf))
566  return AVERROR_INVALIDDATA;
567  ret = decode_nal_units(h, p, nalsize, 1);
568  if (ret < 0) {
569  av_log(avctx, AV_LOG_ERROR,
570  "Decoding pps %d from avcC failed\n", i);
571  return ret;
572  }
573  p += nalsize;
574  }
575  // Store right nal length size that will be used to parse all other nals
576  h->nal_length_size = (buf[4] & 0x03) + 1;
577  } else {
578  h->is_avc = 0;
579  ret = decode_nal_units(h, buf, size, 1);
580  if (ret < 0)
581  return ret;
582  }
583  return size;
584 }
585 
587 {
588  int i;
589 
590  h->avctx = avctx;
591  h->backup_width = -1;
592  h->backup_height = -1;
594  h->dequant_coeff_pps = -1;
595  h->current_sps_id = -1;
596  h->cur_chroma_format_idc = -1;
597 
599  h->slice_context_count = 1;
600  h->workaround_bugs = avctx->workaround_bugs;
601  h->flags = avctx->flags;
602  h->prev_poc_msb = 1 << 16;
603  h->x264_build = -1;
604  h->recovery_frame = -1;
605  h->frame_recovered = 0;
606  h->prev_frame_num = -1;
608 
609  h->next_outputed_poc = INT_MIN;
610  for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
611  h->last_pocs[i] = INT_MIN;
612 
614 
616 
618  h->slice_ctx = av_mallocz_array(h->nb_slice_ctx, sizeof(*h->slice_ctx));
619  if (!h->slice_ctx) {
620  h->nb_slice_ctx = 0;
621  return AVERROR(ENOMEM);
622  }
623 
624  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
625  h->DPB[i].f = av_frame_alloc();
626  if (!h->DPB[i].f)
627  return AVERROR(ENOMEM);
628  }
629 
630  h->cur_pic.f = av_frame_alloc();
631  if (!h->cur_pic.f)
632  return AVERROR(ENOMEM);
633 
635  if (!h->last_pic_for_ec.f)
636  return AVERROR(ENOMEM);
637 
638  for (i = 0; i < h->nb_slice_ctx; i++)
639  h->slice_ctx[i].h264 = h;
640 
641  return 0;
642 }
643 
645 {
646  H264Context *h = avctx->priv_data;
647  int ret;
648 
649  ret = h264_init_context(avctx, h);
650  if (ret < 0)
651  return ret;
652 
653  /* set defaults */
654  if (!avctx->has_b_frames)
655  h->low_delay = 1;
656 
658 
660 
661  if (avctx->codec_id == AV_CODEC_ID_H264) {
662  if (avctx->ticks_per_frame == 1) {
663  if(h->avctx->time_base.den < INT_MAX/2) {
664  h->avctx->time_base.den *= 2;
665  } else
666  h->avctx->time_base.num /= 2;
667  }
668  avctx->ticks_per_frame = 2;
669  }
670 
671  if (avctx->extradata_size > 0 && avctx->extradata) {
672  ret = ff_h264_decode_extradata(h, avctx->extradata, avctx->extradata_size);
673  if (ret < 0) {
675  return ret;
676  }
677  }
678 
682  h->low_delay = 0;
683  }
684 
685  avctx->internal->allocate_progress = 1;
686 
688 
689  if (h->enable_er < 0 && (avctx->active_thread_type & FF_THREAD_SLICE))
690  h->enable_er = 0;
691 
692  if (h->enable_er && (avctx->active_thread_type & FF_THREAD_SLICE)) {
693  av_log(avctx, AV_LOG_WARNING,
694  "Error resilience with slice threads is enabled. It is unsafe and unsupported and may crash. "
695  "Use it at your own risk\n");
696  }
697 
698  return 0;
699 }
700 
702 {
703  H264Context *h = avctx->priv_data;
704  int ret;
705 
706  if (!avctx->internal->is_copy)
707  return 0;
708 
709  memset(h, 0, sizeof(*h));
710 
711  ret = h264_init_context(avctx, h);
712  if (ret < 0)
713  return ret;
714 
715  h->context_initialized = 0;
716 
717  return 0;
718 }
719 
720 /**
721  * Run setup operations that must be run after slice header decoding.
722  * This includes finding the next displayed frame.
723  *
724  * @param h h264 master context
725  * @param setup_finished enough NALs have been read that we can call
726  * ff_thread_finish_setup()
727  */
728 static void decode_postinit(H264Context *h, int setup_finished)
729 {
731  H264Picture *cur = h->cur_pic_ptr;
732  int i, pics, out_of_order, out_idx;
733 
734  h->cur_pic_ptr->f->pict_type = h->pict_type;
735 
736  if (h->next_output_pic)
737  return;
738 
739  if (cur->field_poc[0] == INT_MAX || cur->field_poc[1] == INT_MAX) {
740  /* FIXME: if we have two PAFF fields in one packet, we can't start
741  * the next thread here. If we have one field per packet, we can.
742  * The check in decode_nal_units() is not good enough to find this
743  * yet, so we assume the worst for now. */
744  // if (setup_finished)
745  // ff_thread_finish_setup(h->avctx);
746  if (cur->field_poc[0] == INT_MAX && cur->field_poc[1] == INT_MAX)
747  return;
748  if (h->avctx->hwaccel || h->missing_fields <=1)
749  return;
750  }
751 
752  cur->f->interlaced_frame = 0;
753  cur->f->repeat_pict = 0;
754 
755  /* Signal interlacing information externally. */
756  /* Prioritize picture timing SEI information over used
757  * decoding process if it exists. */
758 
759  if (h->sps.pic_struct_present_flag) {
760  switch (h->sei_pic_struct) {
762  break;
765  cur->f->interlaced_frame = 1;
766  break;
769  if (FIELD_OR_MBAFF_PICTURE(h))
770  cur->f->interlaced_frame = 1;
771  else
772  // try to flag soft telecine progressive
774  break;
777  /* Signal the possibility of telecined film externally
778  * (pic_struct 5,6). From these hints, let the applications
779  * decide if they apply deinterlacing. */
780  cur->f->repeat_pict = 1;
781  break;
783  cur->f->repeat_pict = 2;
784  break;
786  cur->f->repeat_pict = 4;
787  break;
788  }
789 
790  if ((h->sei_ct_type & 3) &&
792  cur->f->interlaced_frame = (h->sei_ct_type & (1 << 1)) != 0;
793  } else {
794  /* Derive interlacing flag from used decoding process. */
796  }
798 
799  if (cur->field_poc[0] != cur->field_poc[1]) {
800  /* Derive top_field_first from field pocs. */
801  cur->f->top_field_first = cur->field_poc[0] < cur->field_poc[1];
802  } else {
803  if (cur->f->interlaced_frame || h->sps.pic_struct_present_flag) {
804  /* Use picture timing SEI information. Even if it is a
805  * information of a past frame, better than nothing. */
808  cur->f->top_field_first = 1;
809  else
810  cur->f->top_field_first = 0;
811  } else {
812  /* Most likely progressive */
813  cur->f->top_field_first = 0;
814  }
815  }
816 
817  if (h->sei_frame_packing_present &&
822  AVStereo3D *stereo = av_stereo3d_create_side_data(cur->f);
823  if (stereo) {
824  switch (h->frame_packing_arrangement_type) {
825  case 0:
826  stereo->type = AV_STEREO3D_CHECKERBOARD;
827  break;
828  case 1:
829  stereo->type = AV_STEREO3D_COLUMNS;
830  break;
831  case 2:
832  stereo->type = AV_STEREO3D_LINES;
833  break;
834  case 3:
835  if (h->quincunx_subsampling)
837  else
838  stereo->type = AV_STEREO3D_SIDEBYSIDE;
839  break;
840  case 4:
841  stereo->type = AV_STEREO3D_TOPBOTTOM;
842  break;
843  case 5:
845  break;
846  case 6:
847  stereo->type = AV_STEREO3D_2D;
848  break;
849  }
850 
851  if (h->content_interpretation_type == 2)
852  stereo->flags = AV_STEREO3D_FLAG_INVERT;
853  }
854  }
855 
858  double angle = h->sei_anticlockwise_rotation * 360 / (double) (1 << 16);
859  AVFrameSideData *rotation = av_frame_new_side_data(cur->f,
861  sizeof(int32_t) * 9);
862  if (rotation) {
863  av_display_rotation_set((int32_t *)rotation->data, angle);
864  av_display_matrix_flip((int32_t *)rotation->data,
865  h->sei_hflip, h->sei_vflip);
866  }
867  }
868 
871  sizeof(uint8_t));
872 
873  if (sd) {
876  }
877  }
878 
879  if (h->a53_caption) {
882  h->a53_caption_size);
883  if (sd)
884  memcpy(sd->data, h->a53_caption, h->a53_caption_size);
885  av_freep(&h->a53_caption);
886  h->a53_caption_size = 0;
888  }
889 
890  cur->mmco_reset = h->mmco_reset;
891  h->mmco_reset = 0;
892 
893  // FIXME do something with unavailable reference frames
894 
895  /* Sort B-frames into display order */
896 
900  h->low_delay = 0;
901  }
902 
906  h->low_delay = 0;
907  }
908 
909  for (i = 0; 1; i++) {
910  if(i == MAX_DELAYED_PIC_COUNT || cur->poc < h->last_pocs[i]){
911  if(i)
912  h->last_pocs[i-1] = cur->poc;
913  break;
914  } else if(i) {
915  h->last_pocs[i-1]= h->last_pocs[i];
916  }
917  }
918  out_of_order = MAX_DELAYED_PIC_COUNT - i;
919  if( cur->f->pict_type == AV_PICTURE_TYPE_B
921  out_of_order = FFMAX(out_of_order, 1);
922  if (out_of_order == MAX_DELAYED_PIC_COUNT) {
923  av_log(h->avctx, AV_LOG_VERBOSE, "Invalid POC %d<%d\n", cur->poc, h->last_pocs[0]);
924  for (i = 1; i < MAX_DELAYED_PIC_COUNT; i++)
925  h->last_pocs[i] = INT_MIN;
926  h->last_pocs[0] = cur->poc;
927  cur->mmco_reset = 1;
928  } else if(h->avctx->has_b_frames < out_of_order && !h->sps.bitstream_restriction_flag){
929  av_log(h->avctx, AV_LOG_VERBOSE, "Increasing reorder buffer to %d\n", out_of_order);
930  h->avctx->has_b_frames = out_of_order;
931  h->low_delay = 0;
932  }
933 
934  pics = 0;
935  while (h->delayed_pic[pics])
936  pics++;
937 
939 
940  h->delayed_pic[pics++] = cur;
941  if (cur->reference == 0)
942  cur->reference = DELAYED_PIC_REF;
943 
944  out = h->delayed_pic[0];
945  out_idx = 0;
946  for (i = 1; h->delayed_pic[i] &&
947  !h->delayed_pic[i]->f->key_frame &&
948  !h->delayed_pic[i]->mmco_reset;
949  i++)
950  if (h->delayed_pic[i]->poc < out->poc) {
951  out = h->delayed_pic[i];
952  out_idx = i;
953  }
954  if (h->avctx->has_b_frames == 0 &&
955  (h->delayed_pic[0]->f->key_frame || h->delayed_pic[0]->mmco_reset))
956  h->next_outputed_poc = INT_MIN;
957  out_of_order = out->poc < h->next_outputed_poc;
958 
959  if (out_of_order || pics > h->avctx->has_b_frames) {
960  out->reference &= ~DELAYED_PIC_REF;
961  // for frame threading, the owner must be the second field's thread or
962  // else the first thread can release the picture and reuse it unsafely
963  for (i = out_idx; h->delayed_pic[i]; i++)
964  h->delayed_pic[i] = h->delayed_pic[i + 1];
965  }
966  if (!out_of_order && pics > h->avctx->has_b_frames) {
967  h->next_output_pic = out;
968  if (out_idx == 0 && h->delayed_pic[0] && (h->delayed_pic[0]->f->key_frame || h->delayed_pic[0]->mmco_reset)) {
969  h->next_outputed_poc = INT_MIN;
970  } else
971  h->next_outputed_poc = out->poc;
972  } else {
973  av_log(h->avctx, AV_LOG_DEBUG, "no picture %s\n", out_of_order ? "ooo" : "");
974  }
975 
976  if (h->next_output_pic) {
977  if (h->next_output_pic->recovered) {
978  // We have reached an recovery point and all frames after it in
979  // display order are "recovered".
981  }
983  }
984 
985  if (setup_finished && !h->avctx->hwaccel) {
987 
989  h->setup_finished = 1;
990  }
991 }
992 
994 {
995  int list, i;
996  int luma_def, chroma_def;
997 
998  sl->use_weight = 0;
999  sl->use_weight_chroma = 0;
1001  if (h->sps.chroma_format_idc)
1003 
1004  if (sl->luma_log2_weight_denom > 7U) {
1005  av_log(h->avctx, AV_LOG_ERROR, "luma_log2_weight_denom %d is out of range\n", sl->luma_log2_weight_denom);
1006  sl->luma_log2_weight_denom = 0;
1007  }
1008  if (sl->chroma_log2_weight_denom > 7U) {
1009  av_log(h->avctx, AV_LOG_ERROR, "chroma_log2_weight_denom %d is out of range\n", sl->chroma_log2_weight_denom);
1010  sl->chroma_log2_weight_denom = 0;
1011  }
1012 
1013  luma_def = 1 << sl->luma_log2_weight_denom;
1014  chroma_def = 1 << sl->chroma_log2_weight_denom;
1015 
1016  for (list = 0; list < 2; list++) {
1017  sl->luma_weight_flag[list] = 0;
1018  sl->chroma_weight_flag[list] = 0;
1019  for (i = 0; i < sl->ref_count[list]; i++) {
1020  int luma_weight_flag, chroma_weight_flag;
1021 
1022  luma_weight_flag = get_bits1(&sl->gb);
1023  if (luma_weight_flag) {
1024  sl->luma_weight[i][list][0] = get_se_golomb(&sl->gb);
1025  sl->luma_weight[i][list][1] = get_se_golomb(&sl->gb);
1026  if (sl->luma_weight[i][list][0] != luma_def ||
1027  sl->luma_weight[i][list][1] != 0) {
1028  sl->use_weight = 1;
1029  sl->luma_weight_flag[list] = 1;
1030  }
1031  } else {
1032  sl->luma_weight[i][list][0] = luma_def;
1033  sl->luma_weight[i][list][1] = 0;
1034  }
1035 
1036  if (h->sps.chroma_format_idc) {
1037  chroma_weight_flag = get_bits1(&sl->gb);
1038  if (chroma_weight_flag) {
1039  int j;
1040  for (j = 0; j < 2; j++) {
1041  sl->chroma_weight[i][list][j][0] = get_se_golomb(&sl->gb);
1042  sl->chroma_weight[i][list][j][1] = get_se_golomb(&sl->gb);
1043  if (sl->chroma_weight[i][list][j][0] != chroma_def ||
1044  sl->chroma_weight[i][list][j][1] != 0) {
1045  sl->use_weight_chroma = 1;
1046  sl->chroma_weight_flag[list] = 1;
1047  }
1048  }
1049  } else {
1050  int j;
1051  for (j = 0; j < 2; j++) {
1052  sl->chroma_weight[i][list][j][0] = chroma_def;
1053  sl->chroma_weight[i][list][j][1] = 0;
1054  }
1055  }
1056  }
1057  }
1058  if (sl->slice_type_nos != AV_PICTURE_TYPE_B)
1059  break;
1060  }
1061  sl->use_weight = sl->use_weight || sl->use_weight_chroma;
1062  return 0;
1063 }
1064 
1065 /**
1066  * instantaneous decoder refresh.
1067  */
1068 static void idr(H264Context *h)
1069 {
1070  int i;
1072  h->prev_frame_num =
1073  h->prev_frame_num_offset = 0;
1074  h->prev_poc_msb = 1<<16;
1075  h->prev_poc_lsb = 0;
1076  for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
1077  h->last_pocs[i] = INT_MIN;
1078 }
1079 
1080 /* forget old pics after a seek */
1082 {
1083  int i, j;
1084 
1085  h->next_outputed_poc = INT_MIN;
1086  h->prev_interlaced_frame = 1;
1087  idr(h);
1088 
1089  h->prev_frame_num = -1;
1090  if (h->cur_pic_ptr) {
1091  h->cur_pic_ptr->reference = 0;
1092  for (j=i=0; h->delayed_pic[i]; i++)
1093  if (h->delayed_pic[i] != h->cur_pic_ptr)
1094  h->delayed_pic[j++] = h->delayed_pic[i];
1095  h->delayed_pic[j] = NULL;
1096  }
1098 
1099  h->first_field = 0;
1100  ff_h264_reset_sei(h);
1101  h->recovery_frame = -1;
1102  h->frame_recovered = 0;
1103  h->current_slice = 0;
1104  h->mmco_reset = 1;
1105  for (i = 0; i < h->nb_slice_ctx; i++)
1106  h->slice_ctx[i].list_count = 0;
1107 }
1108 
1109 /* forget old pics after a seek */
1110 static void flush_dpb(AVCodecContext *avctx)
1111 {
1112  H264Context *h = avctx->priv_data;
1113  int i;
1114 
1115  memset(h->delayed_pic, 0, sizeof(h->delayed_pic));
1116 
1118 
1119  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++)
1120  ff_h264_unref_picture(h, &h->DPB[i]);
1121  h->cur_pic_ptr = NULL;
1123 
1124  h->mb_y = 0;
1125 
1127  h->context_initialized = 0;
1128 }
1129 
1130 int ff_init_poc(H264Context *h, int pic_field_poc[2], int *pic_poc)
1131 {
1132  const int max_frame_num = 1 << h->sps.log2_max_frame_num;
1133  int field_poc[2];
1134 
1136  if (h->frame_num < h->prev_frame_num)
1137  h->frame_num_offset += max_frame_num;
1138 
1139  if (h->sps.poc_type == 0) {
1140  const int max_poc_lsb = 1 << h->sps.log2_max_poc_lsb;
1141 
1142  if (h->poc_lsb < h->prev_poc_lsb &&
1143  h->prev_poc_lsb - h->poc_lsb >= max_poc_lsb / 2)
1144  h->poc_msb = h->prev_poc_msb + max_poc_lsb;
1145  else if (h->poc_lsb > h->prev_poc_lsb &&
1146  h->prev_poc_lsb - h->poc_lsb < -max_poc_lsb / 2)
1147  h->poc_msb = h->prev_poc_msb - max_poc_lsb;
1148  else
1149  h->poc_msb = h->prev_poc_msb;
1150  field_poc[0] =
1151  field_poc[1] = h->poc_msb + h->poc_lsb;
1152  if (h->picture_structure == PICT_FRAME)
1153  field_poc[1] += h->delta_poc_bottom;
1154  } else if (h->sps.poc_type == 1) {
1155  int abs_frame_num, expected_delta_per_poc_cycle, expectedpoc;
1156  int i;
1157 
1158  if (h->sps.poc_cycle_length != 0)
1159  abs_frame_num = h->frame_num_offset + h->frame_num;
1160  else
1161  abs_frame_num = 0;
1162 
1163  if (h->nal_ref_idc == 0 && abs_frame_num > 0)
1164  abs_frame_num--;
1165 
1166  expected_delta_per_poc_cycle = 0;
1167  for (i = 0; i < h->sps.poc_cycle_length; i++)
1168  // FIXME integrate during sps parse
1169  expected_delta_per_poc_cycle += h->sps.offset_for_ref_frame[i];
1170 
1171  if (abs_frame_num > 0) {
1172  int poc_cycle_cnt = (abs_frame_num - 1) / h->sps.poc_cycle_length;
1173  int frame_num_in_poc_cycle = (abs_frame_num - 1) % h->sps.poc_cycle_length;
1174 
1175  expectedpoc = poc_cycle_cnt * expected_delta_per_poc_cycle;
1176  for (i = 0; i <= frame_num_in_poc_cycle; i++)
1177  expectedpoc = expectedpoc + h->sps.offset_for_ref_frame[i];
1178  } else
1179  expectedpoc = 0;
1180 
1181  if (h->nal_ref_idc == 0)
1182  expectedpoc = expectedpoc + h->sps.offset_for_non_ref_pic;
1183 
1184  field_poc[0] = expectedpoc + h->delta_poc[0];
1185  field_poc[1] = field_poc[0] + h->sps.offset_for_top_to_bottom_field;
1186 
1187  if (h->picture_structure == PICT_FRAME)
1188  field_poc[1] += h->delta_poc[1];
1189  } else {
1190  int poc = 2 * (h->frame_num_offset + h->frame_num);
1191 
1192  if (!h->nal_ref_idc)
1193  poc--;
1194 
1195  field_poc[0] = poc;
1196  field_poc[1] = poc;
1197  }
1198 
1200  pic_field_poc[0] = field_poc[0];
1202  pic_field_poc[1] = field_poc[1];
1203  *pic_poc = FFMIN(pic_field_poc[0], pic_field_poc[1]);
1204 
1205  return 0;
1206 }
1207 
1208 /**
1209  * Compute profile from profile_idc and constraint_set?_flags.
1210  *
1211  * @param sps SPS
1212  *
1213  * @return profile as defined by FF_PROFILE_H264_*
1214  */
1216 {
1217  int profile = sps->profile_idc;
1218 
1219  switch (sps->profile_idc) {
1221  // constraint_set1_flag set to 1
1222  profile |= (sps->constraint_set_flags & 1 << 1) ? FF_PROFILE_H264_CONSTRAINED : 0;
1223  break;
1227  // constraint_set3_flag set to 1
1228  profile |= (sps->constraint_set_flags & 1 << 3) ? FF_PROFILE_H264_INTRA : 0;
1229  break;
1230  }
1231 
1232  return profile;
1233 }
1234 
1236 {
1237  int ref_count[2], list_count;
1238  int num_ref_idx_active_override_flag;
1239 
1240  // set defaults, might be overridden a few lines later
1241  ref_count[0] = h->pps.ref_count[0];
1242  ref_count[1] = h->pps.ref_count[1];
1243 
1244  if (sl->slice_type_nos != AV_PICTURE_TYPE_I) {
1245  unsigned max[2];
1246  max[0] = max[1] = h->picture_structure == PICT_FRAME ? 15 : 31;
1247 
1248  if (sl->slice_type_nos == AV_PICTURE_TYPE_B)
1249  sl->direct_spatial_mv_pred = get_bits1(&sl->gb);
1250  num_ref_idx_active_override_flag = get_bits1(&sl->gb);
1251 
1252  if (num_ref_idx_active_override_flag) {
1253  ref_count[0] = get_ue_golomb(&sl->gb) + 1;
1254  if (sl->slice_type_nos == AV_PICTURE_TYPE_B) {
1255  ref_count[1] = get_ue_golomb(&sl->gb) + 1;
1256  } else
1257  // full range is spec-ok in this case, even for frames
1258  ref_count[1] = 1;
1259  }
1260 
1261  if (ref_count[0]-1 > max[0] || ref_count[1]-1 > max[1]){
1262  av_log(h->avctx, AV_LOG_ERROR, "reference overflow %u > %u or %u > %u\n", ref_count[0]-1, max[0], ref_count[1]-1, max[1]);
1263  sl->ref_count[0] = sl->ref_count[1] = 0;
1264  sl->list_count = 0;
1265  return AVERROR_INVALIDDATA;
1266  }
1267 
1268  if (sl->slice_type_nos == AV_PICTURE_TYPE_B)
1269  list_count = 2;
1270  else
1271  list_count = 1;
1272  } else {
1273  list_count = 0;
1274  ref_count[0] = ref_count[1] = 0;
1275  }
1276 
1277  if (list_count != sl->list_count ||
1278  ref_count[0] != sl->ref_count[0] ||
1279  ref_count[1] != sl->ref_count[1]) {
1280  sl->ref_count[0] = ref_count[0];
1281  sl->ref_count[1] = ref_count[1];
1282  sl->list_count = list_count;
1283  return 1;
1284  }
1285 
1286  return 0;
1287 }
1288 
1289 static const uint8_t start_code[] = { 0x00, 0x00, 0x01 };
1290 
1292  const uint8_t *ptr, int dst_length,
1293  int i, int next_avc)
1294 {
1295  if ((h->workaround_bugs & FF_BUG_AUTODETECT) && i + 3 < next_avc &&
1296  buf[i] == 0x00 && buf[i + 1] == 0x00 &&
1297  buf[i + 2] == 0x01 && buf[i + 3] == 0xE0)
1299 
1300  if (!(h->workaround_bugs & FF_BUG_TRUNCATED))
1301  while (dst_length > 0 && ptr[dst_length - 1] == 0)
1302  dst_length--;
1303 
1304  if (!dst_length)
1305  return 0;
1306 
1307  return 8 * dst_length - decode_rbsp_trailing(h, ptr + dst_length - 1);
1308 }
1309 
1310 static int get_last_needed_nal(H264Context *h, const uint8_t *buf, int buf_size)
1311 {
1312  int next_avc = h->is_avc ? 0 : buf_size;
1313  int nal_index = 0;
1314  int buf_index = 0;
1315  int nals_needed = 0;
1316  int first_slice = 0;
1317 
1318  while(1) {
1319  GetBitContext gb;
1320  int nalsize = 0;
1321  int dst_length, bit_length, consumed;
1322  const uint8_t *ptr;
1323 
1324  if (buf_index >= next_avc) {
1325  nalsize = get_avc_nalsize(h, buf, buf_size, &buf_index);
1326  if (nalsize < 0)
1327  break;
1328  next_avc = buf_index + nalsize;
1329  } else {
1330  buf_index = find_start_code(buf, buf_size, buf_index, next_avc);
1331  if (buf_index >= buf_size)
1332  break;
1333  if (buf_index >= next_avc)
1334  continue;
1335  }
1336 
1337  ptr = ff_h264_decode_nal(h, &h->slice_ctx[0], buf + buf_index, &dst_length, &consumed,
1338  next_avc - buf_index);
1339 
1340  if (!ptr || dst_length < 0)
1341  return AVERROR_INVALIDDATA;
1342 
1343  buf_index += consumed;
1344 
1345  bit_length = get_bit_length(h, buf, ptr, dst_length,
1346  buf_index, next_avc);
1347  nal_index++;
1348 
1349  /* packets can sometimes contain multiple PPS/SPS,
1350  * e.g. two PAFF field pictures in one packet, or a demuxer
1351  * which splits NALs strangely if so, when frame threading we
1352  * can't start the next thread until we've read all of them */
1353  switch (h->nal_unit_type) {
1354  case NAL_SPS:
1355  case NAL_PPS:
1356  nals_needed = nal_index;
1357  break;
1358  case NAL_DPA:
1359  case NAL_IDR_SLICE:
1360  case NAL_SLICE:
1361  init_get_bits(&gb, ptr, bit_length);
1362  if (!get_ue_golomb(&gb) ||
1363  !first_slice ||
1364  first_slice != h->nal_unit_type)
1365  nals_needed = nal_index;
1366  if (!first_slice)
1367  first_slice = h->nal_unit_type;
1368  }
1369  }
1370 
1371  return nals_needed;
1372 }
1373 
1374 static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size,
1375  int parse_extradata)
1376 {
1377  AVCodecContext *const avctx = h->avctx;
1378  H264SliceContext *sl;
1379  int buf_index;
1380  unsigned context_count;
1381  int next_avc;
1382  int nals_needed = 0; ///< number of NALs that need decoding before the next frame thread starts
1383  int nal_index;
1384  int idr_cleared=0;
1385  int ret = 0;
1386 
1387  h->nal_unit_type= 0;
1388 
1389  if(!h->slice_context_count)
1390  h->slice_context_count= 1;
1392  if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS)) {
1393  h->current_slice = 0;
1394  if (!h->first_field)
1395  h->cur_pic_ptr = NULL;
1396  ff_h264_reset_sei(h);
1397  }
1398 
1399  if (h->nal_length_size == 4) {
1400  if (buf_size > 8 && AV_RB32(buf) == 1 && AV_RB32(buf+5) > (unsigned)buf_size) {
1401  h->is_avc = 0;
1402  }else if(buf_size > 3 && AV_RB32(buf) > 1 && AV_RB32(buf) <= (unsigned)buf_size)
1403  h->is_avc = 1;
1404  }
1405 
1406  if (avctx->active_thread_type & FF_THREAD_FRAME)
1407  nals_needed = get_last_needed_nal(h, buf, buf_size);
1408 
1409  {
1410  buf_index = 0;
1411  context_count = 0;
1412  next_avc = h->is_avc ? 0 : buf_size;
1413  nal_index = 0;
1414  for (;;) {
1415  int consumed;
1416  int dst_length;
1417  int bit_length;
1418  const uint8_t *ptr;
1419  int nalsize = 0;
1420  int err;
1421 
1422  if (buf_index >= next_avc) {
1423  nalsize = get_avc_nalsize(h, buf, buf_size, &buf_index);
1424  if (nalsize < 0)
1425  break;
1426  next_avc = buf_index + nalsize;
1427  } else {
1428  buf_index = find_start_code(buf, buf_size, buf_index, next_avc);
1429  if (buf_index >= buf_size)
1430  break;
1431  if (buf_index >= next_avc)
1432  continue;
1433  }
1434 
1435  sl = &h->slice_ctx[context_count];
1436 
1437  ptr = ff_h264_decode_nal(h, sl, buf + buf_index, &dst_length,
1438  &consumed, next_avc - buf_index);
1439  if (!ptr || dst_length < 0) {
1440  ret = -1;
1441  goto end;
1442  }
1443 
1444  bit_length = get_bit_length(h, buf, ptr, dst_length,
1445  buf_index + consumed, next_avc);
1446 
1447  if (h->avctx->debug & FF_DEBUG_STARTCODE)
1449  "NAL %d/%d at %d/%d length %d\n",
1450  h->nal_unit_type, h->nal_ref_idc, buf_index, buf_size, dst_length);
1451 
1452  if (h->is_avc && (nalsize != consumed) && nalsize)
1454  "AVC: Consumed only %d bytes instead of %d\n",
1455  consumed, nalsize);
1456 
1457  buf_index += consumed;
1458  nal_index++;
1459 
1460  if (avctx->skip_frame >= AVDISCARD_NONREF &&
1461  h->nal_ref_idc == 0 &&
1462  h->nal_unit_type != NAL_SEI)
1463  continue;
1464 
1465 again:
1466  /* Ignore per frame NAL unit type during extradata
1467  * parsing. Decoding slices is not possible in codec init
1468  * with frame-mt */
1469  if (parse_extradata) {
1470  switch (h->nal_unit_type) {
1471  case NAL_IDR_SLICE:
1472  case NAL_SLICE:
1473  case NAL_DPA:
1474  case NAL_DPB:
1475  case NAL_DPC:
1477  "Ignoring NAL %d in global header/extradata\n",
1478  h->nal_unit_type);
1479  // fall through to next case
1480  case NAL_AUXILIARY_SLICE:
1482  }
1483  }
1484 
1485  err = 0;
1486 
1487  switch (h->nal_unit_type) {
1488  case NAL_IDR_SLICE:
1489  if ((ptr[0] & 0xFC) == 0x98) {
1490  av_log(h->avctx, AV_LOG_ERROR, "Invalid inter IDR frame\n");
1491  h->next_outputed_poc = INT_MIN;
1492  ret = -1;
1493  goto end;
1494  }
1495  if (h->nal_unit_type != NAL_IDR_SLICE) {
1497  "Invalid mix of idr and non-idr slices\n");
1498  ret = -1;
1499  goto end;
1500  }
1501  if(!idr_cleared) {
1502  if (h->current_slice && (avctx->active_thread_type & FF_THREAD_SLICE)) {
1503  av_log(h, AV_LOG_ERROR, "invalid mixed IDR / non IDR frames cannot be decoded in slice multithreading mode\n");
1504  ret = AVERROR_INVALIDDATA;
1505  goto end;
1506  }
1507  idr(h); // FIXME ensure we don't lose some frames if there is reordering
1508  }
1509  idr_cleared = 1;
1510  h->has_recovery_point = 1;
1511  case NAL_SLICE:
1512  init_get_bits(&sl->gb, ptr, bit_length);
1513 
1514  if ( nals_needed >= nal_index
1515  || (!(avctx->active_thread_type & FF_THREAD_FRAME) && !context_count))
1516  h->au_pps_id = -1;
1517 
1518  if ((err = ff_h264_decode_slice_header(h, sl)))
1519  break;
1520 
1521  if (h->sei_recovery_frame_cnt >= 0) {
1523  h->valid_recovery_point = 1;
1524 
1525  if ( h->recovery_frame < 0
1526  || av_mod_uintp2(h->recovery_frame - h->frame_num, h->sps.log2_max_frame_num) > h->sei_recovery_frame_cnt) {
1527  h->recovery_frame = av_mod_uintp2(h->frame_num + h->sei_recovery_frame_cnt, h->sps.log2_max_frame_num);
1528 
1529  if (!h->valid_recovery_point)
1530  h->recovery_frame = h->frame_num;
1531  }
1532  }
1533 
1534  h->cur_pic_ptr->f->key_frame |=
1535  (h->nal_unit_type == NAL_IDR_SLICE);
1536 
1537  if (h->nal_unit_type == NAL_IDR_SLICE ||
1538  h->recovery_frame == h->frame_num) {
1539  h->recovery_frame = -1;
1540  h->cur_pic_ptr->recovered = 1;
1541  }
1542  // If we have an IDR, all frames after it in decoded order are
1543  // "recovered".
1544  if (h->nal_unit_type == NAL_IDR_SLICE)
1546  h->frame_recovered |= 3*!!(avctx->flags2 & AV_CODEC_FLAG2_SHOW_ALL);
1547  h->frame_recovered |= 3*!!(avctx->flags & AV_CODEC_FLAG_OUTPUT_CORRUPT);
1548 #if 1
1550 #else
1552 #endif
1553 
1554  if (h->current_slice == 1) {
1555  if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS))
1556  decode_postinit(h, nal_index >= nals_needed);
1557 
1558  if (h->avctx->hwaccel &&
1559  (ret = h->avctx->hwaccel->start_frame(h->avctx, buf, buf_size)) < 0)
1560  goto end;
1561 #if FF_API_CAP_VDPAU
1565 #endif
1566  }
1567 
1568  if (sl->redundant_pic_count == 0) {
1569  if (avctx->hwaccel) {
1570  ret = avctx->hwaccel->decode_slice(avctx,
1571  &buf[buf_index - consumed],
1572  consumed);
1573  if (ret < 0)
1574  goto end;
1575 #if FF_API_CAP_VDPAU
1576  } else if (CONFIG_H264_VDPAU_DECODER &&
1579  start_code,
1580  sizeof(start_code));
1582  &buf[buf_index - consumed],
1583  consumed);
1584 #endif
1585  } else
1586  context_count++;
1587  } else
1588  sl->ref_count[0] = sl->ref_count[1] = 0;
1589  break;
1590  break;
1591  case NAL_DPA:
1592  case NAL_DPB:
1593  case NAL_DPC:
1594  avpriv_request_sample(avctx, "data partitioning");
1595  break;
1596  case NAL_SEI:
1597  init_get_bits(&h->gb, ptr, bit_length);
1598  ret = ff_h264_decode_sei(h);
1599  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
1600  goto end;
1601  break;
1602  case NAL_SPS:
1603  init_get_bits(&h->gb, ptr, bit_length);
1604  if (ff_h264_decode_seq_parameter_set(h, 0) >= 0)
1605  break;
1606  if (h->is_avc ? nalsize : 1) {
1608  "SPS decoding failure, trying again with the complete NAL\n");
1609  if (h->is_avc)
1610  av_assert0(next_avc - buf_index + consumed == nalsize);
1611  if ((next_avc - buf_index + consumed - 1) >= INT_MAX/8)
1612  break;
1613  init_get_bits(&h->gb, &buf[buf_index + 1 - consumed],
1614  8*(next_avc - buf_index + consumed - 1));
1615  if (ff_h264_decode_seq_parameter_set(h, 0) >= 0)
1616  break;
1617  }
1618  init_get_bits(&h->gb, ptr, bit_length);
1620 
1621  break;
1622  case NAL_PPS:
1623  init_get_bits(&h->gb, ptr, bit_length);
1624  ret = ff_h264_decode_picture_parameter_set(h, bit_length);
1625  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
1626  goto end;
1627  break;
1628  case NAL_AUD:
1629  case NAL_END_SEQUENCE:
1630  case NAL_END_STREAM:
1631  case NAL_FILLER_DATA:
1632  case NAL_SPS_EXT:
1633  case NAL_AUXILIARY_SLICE:
1634  break;
1635  case NAL_FF_IGNORE:
1636  break;
1637  default:
1638  av_log(avctx, AV_LOG_DEBUG, "Unknown NAL code: %d (%d bits)\n",
1639  h->nal_unit_type, bit_length);
1640  }
1641 
1642  if (context_count == h->max_contexts) {
1643  ret = ff_h264_execute_decode_slices(h, context_count);
1644  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
1645  goto end;
1646  context_count = 0;
1647  }
1648 
1649  if (err < 0 || err == SLICE_SKIPED) {
1650  if (err < 0)
1651  av_log(h->avctx, AV_LOG_ERROR, "decode_slice_header error\n");
1652  sl->ref_count[0] = sl->ref_count[1] = sl->list_count = 0;
1653  } else if (err == SLICE_SINGLETHREAD) {
1654  if (context_count > 0) {
1655  ret = ff_h264_execute_decode_slices(h, context_count);
1656  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
1657  goto end;
1658  context_count = 0;
1659  }
1660  /* Slice could not be decoded in parallel mode, restart. Note
1661  * that rbsp_buffer is not transferred, but since we no longer
1662  * run in parallel mode this should not be an issue. */
1663  sl = &h->slice_ctx[0];
1664  goto again;
1665  }
1666  }
1667  }
1668  if (context_count) {
1669  ret = ff_h264_execute_decode_slices(h, context_count);
1670  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
1671  goto end;
1672  }
1673 
1674  ret = 0;
1675 end:
1676 
1677 #if CONFIG_ERROR_RESILIENCE
1678  sl = h->slice_ctx;
1679  /*
1680  * FIXME: Error handling code does not seem to support interlaced
1681  * when slices span multiple rows
1682  * The ff_er_add_slice calls don't work right for bottom
1683  * fields; they cause massive erroneous error concealing
1684  * Error marking covers both fields (top and bottom).
1685  * This causes a mismatched s->error_count
1686  * and a bad error table. Further, the error count goes to
1687  * INT_MAX when called for bottom field, because mb_y is
1688  * past end by one (callers fault) and resync_mb_y != 0
1689  * causes problems for the first MB line, too.
1690  */
1691  if (!FIELD_PICTURE(h) && h->current_slice && !h->sps.new && h->enable_er) {
1692  int use_last_pic = h->last_pic_for_ec.f->buf[0] && !sl->ref_count[0];
1693 
1695 
1696  if (use_last_pic) {
1698  sl->ref_list[0][0].parent = &h->last_pic_for_ec;
1699  memcpy(sl->ref_list[0][0].data, h->last_pic_for_ec.f->data, sizeof(sl->ref_list[0][0].data));
1700  memcpy(sl->ref_list[0][0].linesize, h->last_pic_for_ec.f->linesize, sizeof(sl->ref_list[0][0].linesize));
1702  } else if (sl->ref_count[0]) {
1703  ff_h264_set_erpic(&sl->er.last_pic, sl->ref_list[0][0].parent);
1704  } else
1706 
1707  if (sl->ref_count[1])
1708  ff_h264_set_erpic(&sl->er.next_pic, sl->ref_list[1][0].parent);
1709 
1710  sl->er.ref_count = sl->ref_count[0];
1711 
1712  ff_er_frame_end(&sl->er);
1713  if (use_last_pic)
1714  memset(&sl->ref_list[0][0], 0, sizeof(sl->ref_list[0][0]));
1715  }
1716 #endif /* CONFIG_ERROR_RESILIENCE */
1717  /* clean up */
1718  if (h->cur_pic_ptr && !h->droppable) {
1721  }
1722 
1723  return (ret < 0) ? ret : buf_index;
1724 }
1725 
1726 /**
1727  * Return the number of bytes consumed for building the current frame.
1728  */
1729 static int get_consumed_bytes(int pos, int buf_size)
1730 {
1731  if (pos == 0)
1732  pos = 1; // avoid infinite loops (I doubt that is needed but...)
1733  if (pos + 10 > buf_size)
1734  pos = buf_size; // oops ;)
1735 
1736  return pos;
1737 }
1738 
1740 {
1741  AVFrame *src = srcp->f;
1742  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(src->format);
1743  int i;
1744  int ret = av_frame_ref(dst, src);
1745  if (ret < 0)
1746  return ret;
1747 
1748  av_dict_set(&dst->metadata, "stereo_mode", ff_h264_sei_stereo_mode(h), 0);
1749 
1750  h->backup_width = h->avctx->width;
1751  h->backup_height = h->avctx->height;
1752  h->backup_pix_fmt = h->avctx->pix_fmt;
1753 
1754  h->avctx->width = dst->width;
1755  h->avctx->height = dst->height;
1756  h->avctx->pix_fmt = dst->format;
1757 
1758  if (srcp->sei_recovery_frame_cnt == 0)
1759  dst->key_frame = 1;
1760  if (!srcp->crop)
1761  return 0;
1762 
1763  for (i = 0; i < desc->nb_components; i++) {
1764  int hshift = (i > 0) ? desc->log2_chroma_w : 0;
1765  int vshift = (i > 0) ? desc->log2_chroma_h : 0;
1766  int off = ((srcp->crop_left >> hshift) << h->pixel_shift) +
1767  (srcp->crop_top >> vshift) * dst->linesize[i];
1768  dst->data[i] += off;
1769  }
1770  return 0;
1771 }
1772 
1773 static int is_extra(const uint8_t *buf, int buf_size)
1774 {
1775  int cnt= buf[5]&0x1f;
1776  const uint8_t *p= buf+6;
1777  while(cnt--){
1778  int nalsize= AV_RB16(p) + 2;
1779  if(nalsize > buf_size - (p-buf) || (p[2] & 0x9F) != 7)
1780  return 0;
1781  p += nalsize;
1782  }
1783  cnt = *(p++);
1784  if(!cnt)
1785  return 0;
1786  while(cnt--){
1787  int nalsize= AV_RB16(p) + 2;
1788  if(nalsize > buf_size - (p-buf) || (p[2] & 0x9F) != 8)
1789  return 0;
1790  p += nalsize;
1791  }
1792  return 1;
1793 }
1794 
1795 static int h264_decode_frame(AVCodecContext *avctx, void *data,
1796  int *got_frame, AVPacket *avpkt)
1797 {
1798  const uint8_t *buf = avpkt->data;
1799  int buf_size = avpkt->size;
1800  H264Context *h = avctx->priv_data;
1801  AVFrame *pict = data;
1802  int buf_index = 0;
1803  H264Picture *out;
1804  int i, out_idx;
1805  int ret;
1806 
1807  h->flags = avctx->flags;
1808  h->setup_finished = 0;
1809 
1810  if (h->backup_width != -1) {
1811  avctx->width = h->backup_width;
1812  h->backup_width = -1;
1813  }
1814  if (h->backup_height != -1) {
1815  avctx->height = h->backup_height;
1816  h->backup_height = -1;
1817  }
1818  if (h->backup_pix_fmt != AV_PIX_FMT_NONE) {
1819  avctx->pix_fmt = h->backup_pix_fmt;
1821  }
1822 
1824 
1825  /* end of stream, output what is still in the buffers */
1826  if (buf_size == 0) {
1827  out:
1828 
1829  h->cur_pic_ptr = NULL;
1830  h->first_field = 0;
1831 
1832  // FIXME factorize this with the output code below
1833  out = h->delayed_pic[0];
1834  out_idx = 0;
1835  for (i = 1;
1836  h->delayed_pic[i] &&
1837  !h->delayed_pic[i]->f->key_frame &&
1838  !h->delayed_pic[i]->mmco_reset;
1839  i++)
1840  if (h->delayed_pic[i]->poc < out->poc) {
1841  out = h->delayed_pic[i];
1842  out_idx = i;
1843  }
1844 
1845  for (i = out_idx; h->delayed_pic[i]; i++)
1846  h->delayed_pic[i] = h->delayed_pic[i + 1];
1847 
1848  if (out) {
1849  out->reference &= ~DELAYED_PIC_REF;
1850  ret = output_frame(h, pict, out);
1851  if (ret < 0)
1852  return ret;
1853  *got_frame = 1;
1854  }
1855 
1856  return buf_index;
1857  }
1859  int side_size;
1860  uint8_t *side = av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA, &side_size);
1861  if (is_extra(side, side_size))
1862  ff_h264_decode_extradata(h, side, side_size);
1863  }
1864  if(h->is_avc && buf_size >= 9 && buf[0]==1 && buf[2]==0 && (buf[4]&0xFC)==0xFC && (buf[5]&0x1F) && buf[8]==0x67){
1865  if (is_extra(buf, buf_size))
1866  return ff_h264_decode_extradata(h, buf, buf_size);
1867  }
1868 
1869  buf_index = decode_nal_units(h, buf, buf_size, 0);
1870  if (buf_index < 0)
1871  return AVERROR_INVALIDDATA;
1872 
1873  if (!h->cur_pic_ptr && h->nal_unit_type == NAL_END_SEQUENCE) {
1874  av_assert0(buf_index <= buf_size);
1875  goto out;
1876  }
1877 
1878  if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS) && !h->cur_pic_ptr) {
1879  if (avctx->skip_frame >= AVDISCARD_NONREF ||
1880  buf_size >= 4 && !memcmp("Q264", buf, 4))
1881  return buf_size;
1882  av_log(avctx, AV_LOG_ERROR, "no frame!\n");
1883  return AVERROR_INVALIDDATA;
1884  }
1885 
1886  if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS) ||
1887  (h->mb_y >= h->mb_height && h->mb_height)) {
1888  if (avctx->flags2 & AV_CODEC_FLAG2_CHUNKS)
1889  decode_postinit(h, 1);
1890 
1891  if ((ret = ff_h264_field_end(h, &h->slice_ctx[0], 0)) < 0)
1892  return ret;
1893 
1894  /* Wait for second field. */
1895  *got_frame = 0;
1896  if (h->next_output_pic && (
1897  h->next_output_pic->recovered)) {
1898  if (!h->next_output_pic->recovered)
1900 
1901  if (!h->avctx->hwaccel &&
1902  (h->next_output_pic->field_poc[0] == INT_MAX ||
1903  h->next_output_pic->field_poc[1] == INT_MAX)
1904  ) {
1905  int p;
1906  AVFrame *f = h->next_output_pic->f;
1907  int field = h->next_output_pic->field_poc[0] == INT_MAX;
1908  uint8_t *dst_data[4];
1909  int linesizes[4];
1910  const uint8_t *src_data[4];
1911 
1912  av_log(h->avctx, AV_LOG_DEBUG, "Duplicating field %d to fill missing\n", field);
1913 
1914  for (p = 0; p<4; p++) {
1915  dst_data[p] = f->data[p] + (field^1)*f->linesize[p];
1916  src_data[p] = f->data[p] + field *f->linesize[p];
1917  linesizes[p] = 2*f->linesize[p];
1918  }
1919 
1920  av_image_copy(dst_data, linesizes, src_data, linesizes,
1921  f->format, f->width, f->height>>1);
1922  }
1923 
1924  ret = output_frame(h, pict, h->next_output_pic);
1925  if (ret < 0)
1926  return ret;
1927  *got_frame = 1;
1928  if (CONFIG_MPEGVIDEO) {
1929  ff_print_debug_info2(h->avctx, pict, NULL,
1933  &h->low_delay,
1934  h->mb_width, h->mb_height, h->mb_stride, 1);
1935  }
1936  }
1937  }
1938 
1939  av_assert0(pict->buf[0] || !*got_frame);
1940 
1942 
1943  return get_consumed_bytes(buf_index, buf_size);
1944 }
1945 
1947 {
1948  int i;
1949 
1951 
1952  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
1953  ff_h264_unref_picture(h, &h->DPB[i]);
1954  av_frame_free(&h->DPB[i].f);
1955  }
1956  memset(h->delayed_pic, 0, sizeof(h->delayed_pic));
1957 
1958  h->cur_pic_ptr = NULL;
1959 
1960  for (i = 0; i < h->nb_slice_ctx; i++)
1961  av_freep(&h->slice_ctx[i].rbsp_buffer);
1962  av_freep(&h->slice_ctx);
1963  h->nb_slice_ctx = 0;
1964 
1965  for (i = 0; i < MAX_SPS_COUNT; i++)
1966  av_freep(h->sps_buffers + i);
1967 
1968  for (i = 0; i < MAX_PPS_COUNT; i++)
1969  av_freep(h->pps_buffers + i);
1970 }
1971 
1973 {
1974  H264Context *h = avctx->priv_data;
1975 
1978 
1980  av_frame_free(&h->cur_pic.f);
1983 
1984  return 0;
1985 }
1986 
1987 #define OFFSET(x) offsetof(H264Context, x)
1988 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
1989 static const AVOption h264_options[] = {
1990  {"is_avc", "is avc", offsetof(H264Context, is_avc), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, 0},
1991  {"nal_length_size", "nal_length_size", offsetof(H264Context, nal_length_size), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 4, 0},
1992  { "enable_er", "Enable error resilience on damaged frames (unsafe)", OFFSET(enable_er), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 1, VD },
1993  { NULL },
1994 };
1995 
1996 static const AVClass h264_class = {
1997  .class_name = "H264 Decoder",
1998  .item_name = av_default_item_name,
1999  .option = h264_options,
2000  .version = LIBAVUTIL_VERSION_INT,
2001 };
2002 
2003 static const AVProfile profiles[] = {
2004  { FF_PROFILE_H264_BASELINE, "Baseline" },
2005  { FF_PROFILE_H264_CONSTRAINED_BASELINE, "Constrained Baseline" },
2006  { FF_PROFILE_H264_MAIN, "Main" },
2007  { FF_PROFILE_H264_EXTENDED, "Extended" },
2008  { FF_PROFILE_H264_HIGH, "High" },
2009  { FF_PROFILE_H264_HIGH_10, "High 10" },
2010  { FF_PROFILE_H264_HIGH_10_INTRA, "High 10 Intra" },
2011  { FF_PROFILE_H264_HIGH_422, "High 4:2:2" },
2012  { FF_PROFILE_H264_HIGH_422_INTRA, "High 4:2:2 Intra" },
2013  { FF_PROFILE_H264_HIGH_444, "High 4:4:4" },
2014  { FF_PROFILE_H264_HIGH_444_PREDICTIVE, "High 4:4:4 Predictive" },
2015  { FF_PROFILE_H264_HIGH_444_INTRA, "High 4:4:4 Intra" },
2016  { FF_PROFILE_H264_CAVLC_444, "CAVLC 4:4:4" },
2017  { FF_PROFILE_UNKNOWN },
2018 };
2019 
2021  .name = "h264",
2022  .long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10"),
2023  .type = AVMEDIA_TYPE_VIDEO,
2024  .id = AV_CODEC_ID_H264,
2025  .priv_data_size = sizeof(H264Context),
2027  .close = h264_decode_end,
2029  .capabilities = /*AV_CODEC_CAP_DRAW_HORIZ_BAND |*/ AV_CODEC_CAP_DR1 |
2032  .flush = flush_dpb,
2034  .update_thread_context = ONLY_IF_THREADS_ENABLED(ff_h264_update_thread_context),
2035  .profiles = NULL_IF_CONFIG_SMALL(profiles),
2036  .priv_class = &h264_class,
2037 };
2038 
2039 #if CONFIG_H264_VDPAU_DECODER && FF_API_VDPAU
2040 static const AVClass h264_vdpau_class = {
2041  .class_name = "H264 VDPAU Decoder",
2042  .item_name = av_default_item_name,
2043  .option = h264_options,
2044  .version = LIBAVUTIL_VERSION_INT,
2045 };
2046 
2047 AVCodec ff_h264_vdpau_decoder = {
2048  .name = "h264_vdpau",
2049  .long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 (VDPAU acceleration)"),
2050  .type = AVMEDIA_TYPE_VIDEO,
2051  .id = AV_CODEC_ID_H264,
2052  .priv_data_size = sizeof(H264Context),
2054  .close = h264_decode_end,
2057  .flush = flush_dpb,
2058  .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_VDPAU_H264,
2059  AV_PIX_FMT_NONE},
2060  .profiles = NULL_IF_CONFIG_SMALL(profiles),
2061  .priv_class = &h264_vdpau_class,
2062 };
2063 #endif
int chroma_format_idc
Definition: h264.h:178
struct H264Context * h264
Definition: h264.h:363
#define ff_tlog(ctx,...)
Definition: internal.h:54
void ff_h264_unref_picture(H264Context *h, H264Picture *pic)
Definition: h264_picture.c:47
#define FF_PROFILE_H264_HIGH_10
Definition: avcodec.h:3162
#define NULL
Definition: coverity.c:32
const struct AVCodec * codec
Definition: avcodec.h:1521
int ff_h264_check_intra_pred_mode(const H264Context *h, H264SliceContext *sl, int mode, int is_chroma)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264.c:184
void ff_h264_flush_change(H264Context *h)
Definition: h264.c:1081
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
Definition: avcodec.h:3650
int workaround_bugs
Definition: h264.h:554
float v
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size, int parse_extradata)
Definition: h264.c:1374
#define DC_128_PRED8x8
Definition: h264pred.h:76
GetBitContext gb
Definition: h264.h:524
int sei_recovery_frame_cnt
Definition: h264.h:344
Views are packed per line, as if interlaced.
Definition: stereo3d.h:97
#define AV_NUM_DATA_POINTERS
Definition: frame.h:172
enum AVPixelFormat backup_pix_fmt
Definition: h264.h:546
int ff_h264_execute_decode_slices(H264Context *h, unsigned context_count)
Call decode_slice() for each context.
Definition: h264_slice.c:2558
5: top field, bottom field, top field repeated, in that order
Definition: h264.h:152
int low_delay
Definition: h264.h:550
int mb_num
Definition: h264.h:622
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2129
mpeg2/4 4:2:0, h264 default for 4:2:0
Definition: pixfmt.h:565
This structure describes decoded (raw) audio or video data.
Definition: frame.h:171
#define FF_ALLOCZ_ARRAY_OR_GOTO(ctx, p, nelem, elsize, label)
Definition: internal.h:159
int16_t mv_cache[2][5 *8][2]
Motion vector cache.
Definition: h264.h:486
AVOption.
Definition: opt.h:255
static const AVClass h264_class
Definition: h264.c:1996
int delta_poc[2]
Definition: h264.h:650
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
static void flush(AVCodecContext *avctx)
Views are alternated temporally.
Definition: stereo3d.h:66
static int get_se_golomb(GetBitContext *gb)
read signed exp golomb code.
Definition: golomb.h:183
int luma_weight[48][2][2]
Definition: h264.h:390
int quincunx_subsampling
Definition: h264.h:736
int edge_emu_buffer_allocated
Definition: h264.h:474
int a53_caption_size
Definition: h264.h:750
3: top field, bottom field, in that order
Definition: h264.h:150
#define FF_PROFILE_H264_HIGH_444
Definition: avcodec.h:3166
#define H264_MAX_PICTURE_COUNT
Definition: h264.h:46
int first_field
Definition: h264.h:591
misc image utilities
Views are next to each other, but when upscaling apply a checkerboard pattern.
Definition: stereo3d.h:87
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
#define LIBAVUTIL_VERSION_INT
Definition: version.h:62
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:441
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
uint16_t * cbp_table
Definition: h264.h:596
av_cold int ff_h264_decode_init(AVCodecContext *avctx)
Definition: h264.c:644
const uint8_t * ff_h264_decode_nal(H264Context *h, H264SliceContext *sl, const uint8_t *src, int *dst_length, int *consumed, int length)
Decode a network abstraction layer unit.
Definition: h264.c:226
#define avpriv_request_sample(...)
7: frame doubling
Definition: h264.h:154
void ff_er_frame_end(ERContext *s)
#define MAX_PPS_COUNT
Definition: h264.h:50
Sequence parameter set.
Definition: h264.h:174
int mb_y
Definition: h264.h:619
int bitstream_restriction_flag
Definition: h264.h:214
unsigned int ref_count[2]
num_ref_idx_l0/1_active_minus1 + 1
Definition: h264.h:243
#define FMO
Definition: h264.h:62
int num
numerator
Definition: rational.h:44
int repeat_pict
When decoding, this signals how much the picture must be delayed.
Definition: frame.h:362
int bipred_scratchpad_allocated
Definition: h264.h:473
static int get_last_needed_nal(H264Context *h, const uint8_t *buf, int buf_size)
Definition: h264.c:1310
int size
Definition: avcodec.h:1434
#define DELAYED_PIC_REF
Value of Picture.reference when Picture is not a reference picture, but is held for delayed output...
Definition: diracdec.c:74
AVBufferPool * mb_type_pool
Definition: h264.h:832
int crop
Definition: h264.h:346
void ff_h264_draw_horiz_band(const H264Context *h, H264SliceContext *sl, int y, int height)
Definition: h264.c:98
int16_t(*[2] motion_val)[2]
Definition: h264.h:317
int flags
Definition: h264.h:553
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1732
int mb_height
Definition: h264.h:620
H264Picture * delayed_pic[MAX_DELAYED_PIC_COUNT+2]
Definition: h264.h:671
int is_avc
Used to parse AVC variant of h264.
Definition: h264.h:633
AVBufferPool * ref_index_pool
Definition: h264.h:834
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:126
void ff_h264_free_tables(H264Context *h)
Definition: h264.c:352
ERPicture last_pic
int ff_h264_get_profile(SPS *sps)
Compute profile from profile_idc and constraint_set?_flags.
Definition: h264.c:1215
int sei_reguserdata_afd_present
User data registered by Rec.
Definition: h264.h:748
void ff_h264_decode_init_vlc(void)
Definition: h264_cavlc.c:327
H264Context.
Definition: h264.h:517
AVFrame * f
Definition: h264.h:310
#define AV_CODEC_FLAG2_CHUNKS
Input bitstream might be truncated at a packet boundaries instead of only at frame boundaries...
Definition: avcodec.h:822
int prev_poc_msb
poc_msb of the last reference pic for POC type 0
Definition: h264.h:652
#define FF_PROFILE_H264_MAIN
Definition: avcodec.h:3159
4: bottom field, top field, in that order
Definition: h264.h:151
#define FRAME_RECOVERED_IDR
We have seen an IDR, so all the following frames in coded order are correctly decodable.
Definition: h264.h:798
AVCodec.
Definition: avcodec.h:3482
int picture_structure
Definition: h264.h:590
Definition: h264.h:117
int luma_weight_flag[2]
7.4.3.2 luma_weight_lX_flag
Definition: h264.h:387
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:80
int profile_idc
Definition: h264.h:176
unsigned current_sps_id
id of the current SPS
Definition: h264.h:575
unsigned int ref_count[2]
num_ref_idx_l0/1_active_minus1 + 1
Definition: h264.h:460
int avpriv_h264_has_num_reorder_frames(AVCodecContext *avctx)
Definition: h264.c:56
int ff_set_ref_count(H264Context *h, H264SliceContext *sl)
Definition: h264.c:1235
Definition: h264.h:118
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1641
uint8_t * chroma_pred_mode_table
Definition: h264.h:599
int setup_finished
Definition: h264.h:815
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:3236
#define AV_RN32A(p)
Definition: intreadwrite.h:526
BYTE int const BYTE * srcp
Definition: avisynth_c.h:676
struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:2932
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: avcodec.h:882
static int decode_init_thread_copy(AVCodecContext *avctx)
Definition: h264.c:701
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
Definition: h264.h:119
uint32_t(*[6] dequant4_coeff)[16]
Definition: h264.h:583
uint8_t * a53_caption
Definition: h264.h:751
uint8_t
#define av_cold
Definition: attributes.h:74
int prev_frame_num_offset
for POC type 2
Definition: h264.h:655
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:135
int offset_for_non_ref_pic
Definition: h264.h:184
mode
Definition: f_perms.c:27
AVOptions.
void ff_h264_reset_sei(H264Context *h)
Reset SEI values at the beginning of the frame.
Definition: h264_sei.c:37
Stereo 3D type: this structure describes how two videos are packed within a single video surface...
Definition: stereo3d.h:123
#define FF_PROFILE_H264_EXTENDED
Definition: avcodec.h:3160
static int h264_init_context(AVCodecContext *avctx, H264Context *h)
Definition: h264.c:586
int poc
frame POC
Definition: h264.h:329
#define AV_RB32
Definition: intreadwrite.h:130
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
AVCodec ff_h264_decoder
Definition: h264.c:2020
Multithreading support functions.
#define TOP_DC_PRED8x8
Definition: h264pred.h:75
static int find_start_code(const uint8_t *buf, int buf_size, int buf_index, int next_avc)
Definition: h264.h:1180
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:366
#define FF_PROFILE_UNKNOWN
Definition: avcodec.h:3126
#define emms_c()
Definition: internal.h:53
#define FF_PROFILE_H264_CONSTRAINED
Definition: avcodec.h:3154
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1627
uint8_t(*[2] top_borders)[(16 *3)*2]
Definition: h264.h:472
#define FF_PROFILE_H264_HIGH_444_INTRA
Definition: avcodec.h:3168
ERPicture cur_pic
int frame_recovered
Initial frame has been completely recovered.
Definition: h264.h:805
Structure to hold side data for an AVFrame.
Definition: frame.h:134
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:34
uint8_t * data
Definition: avcodec.h:1433
static int decode_rbsp_trailing(H264Context *h, const uint8_t *src)
Identify the exact end of the bitstream.
Definition: h264.c:337
#define AV_CODEC_CAP_HWACCEL_VDPAU
Codec can export data for HW decoding (VDPAU).
Definition: avcodec.h:893
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
AVDictionary * metadata
metadata.
Definition: frame.h:543
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:367
#define MAX_DELAYED_PIC_COUNT
Definition: h264.h:54
static void fill_rectangle(SDL_Surface *screen, int x, int y, int w, int h, int color, int update)
Definition: ffplay.c:785
ptrdiff_t size
Definition: opengl_enc.c:101
#define FF_PROFILE_H264_HIGH_422_INTRA
Definition: avcodec.h:3165
void ff_thread_finish_setup(AVCodecContext *avctx)
If the codec defines update_thread_context(), call this when they are ready for the next thread to st...
H264Picture * parent
Definition: h264.h:359
high precision timer, useful to profile code
int recovered
picture at IDR or recovery point + recovery count
Definition: h264.h:342
Active Format Description data consisting of a single byte as specified in ETSI TS 101 154 using AVAc...
Definition: frame.h:89
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:2254
#define av_log(a,...)
int sei_vflip
Definition: h264.h:743
unsigned int rbsp_buffer_size
Definition: h264.h:511
int last_pocs[MAX_DELAYED_PIC_COUNT]
Definition: h264.h:672
H.264 / AVC / MPEG4 part10 codec.
#define U(x)
Definition: vp56_arith.h:37
int frame_num
Definition: h264.h:651
void ff_h264_hl_decode_mb(const H264Context *h, H264SliceContext *sl)
Definition: h264_mb.c:818
H264Picture DPB[H264_MAX_PICTURE_COUNT]
Definition: h264.h:526
int width
width and height of the video frame
Definition: frame.h:220
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:1822
int flags
Additional information about the frame packing.
Definition: stereo3d.h:132
static int get_ue_golomb(GetBitContext *gb)
read unsigned exp golomb code.
Definition: golomb.h:53
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:89
static int get_consumed_bytes(int pos, int buf_size)
Return the number of bytes consumed for building the current frame.
Definition: h264.c:1729
int16_t * dc_val_base
Definition: h264.h:468
int poc_type
pic_order_cnt_type
Definition: h264.h:181
int context_initialized
Definition: h264.h:552
int profile
Definition: mxfenc.c:1820
static const uint16_t mask[17]
Definition: lzw.c:38
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:2911
ERContext er
Definition: h264.h:365
int nal_unit_type
Definition: h264.h:628
av_default_item_name
Definition: h264.h:115
int num_reorder_frames
Definition: h264.h:215
#define AV_RB16
Definition: intreadwrite.h:53
int is_copy
Whether the parent AVCodecContext is a copy of the context which had init() called on it...
Definition: internal.h:100
#define AVERROR(e)
Definition: error.h:43
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:148
int backup_height
Definition: h264.h:545
#define ALZHEIMER_DC_L0T_PRED8x8
Definition: h264pred.h:79
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:178
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:3062
int8_t intra4x4_pred_mode_cache[5 *8]
Definition: h264.h:400
int ff_init_poc(H264Context *h, int pic_field_poc[2], int *pic_poc)
Definition: h264.c:1130
const char * r
Definition: vf_curves.c:107
int backup_width
Backup frame properties: needed, because they can be different between returned frame and last decode...
Definition: h264.h:544
static void flush_dpb(AVCodecContext *avctx)
Definition: h264.c:1110
int capabilities
Codec capabilities.
Definition: avcodec.h:3501
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:58
static const AVOption h264_options[]
Definition: h264.c:1989
int ff_pred_weight_table(H264Context *h, H264SliceContext *sl)
Definition: h264.c:993
void(* decode_mb)(void *opaque, int ref, int mv_dir, int mv_type, int(*mv)[2][4][2], int mb_x, int mb_y, int mb_intra, int mb_skipped)
PPS pps
current pps
Definition: h264.h:577
#define FF_BUG_TRUNCATED
Definition: avcodec.h:2816
uint8_t(*[2] mvd_table)[2]
Definition: h264.h:600
int prev_interlaced_frame
Complement sei_pic_struct SEI_PIC_STRUCT_TOP_BOTTOM and SEI_PIC_STRUCT_BOTTOM_TOP indicate interlaced...
Definition: h264.h:728
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1607
#define FF_BUG_AUTODETECT
autodetection
Definition: avcodec.h:2797
ThreadFrame tf
Definition: h264.h:311
0: frame
Definition: h264.h:147
simple assert() macros that are a bit more flexible than ISO C assert().
#define PICT_TOP_FIELD
Definition: mpegutils.h:33
GLsizei GLsizei * length
Definition: opengl_enc.c:115
const char * name
Name of the codec implementation.
Definition: avcodec.h:3489
int direct_spatial_mv_pred
Definition: h264.h:444
void ff_init_cabac_states(void)
Definition: cabac.c:72
unsigned int top_samples_available
Definition: h264.h:417
static av_always_inline uint32_t pack16to32(unsigned a, unsigned b)
Definition: h264.h:1024
int valid_recovery_point
Are the SEI recovery points looking valid.
Definition: h264.h:782
Video is not stereoscopic (and metadata has to be there).
Definition: stereo3d.h:35
static const uint8_t offset[127][2]
Definition: vf_spp.c:92
uint8_t * list_counts
Array of list_count per MB specifying the slice type.
Definition: h264.h:593
#define FFMAX(a, b)
Definition: common.h:90
Libavcodec external API header.
#define fail()
Definition: checkasm.h:57
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: avcodec.h:920
void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4], const uint8_t *src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Copy image in src_data to dst_data.
Definition: imgutils.c:307
int new
flag to keep track if the decoder context needs re-init due to changed SPS
Definition: h264.h:231
int * mb_index2xy
int offset_for_top_to_bottom_field
Definition: h264.h:185
#define FIELD_OR_MBAFF_PICTURE(h)
Definition: h264.h:91
int slice_type_nos
S free slice type (SI/SP are remapped to I/P)
Definition: h264.h:369
int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
Decode a slice header.
Definition: h264_slice.c:1152
static const uint8_t scan8[16 *3+3]
Definition: h264.h:1008
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:217
int crop_left
Definition: h264.h:347
uint8_t * error_status_table
int use_weight
Definition: h264.h:383
uint8_t * direct_table
Definition: h264.h:601
#define FF_DEBUG_STARTCODE
Definition: avcodec.h:2866
int nal_length_size
Number of bytes used for nal length (1, 2 or 4)
Definition: h264.h:634
useful rectangle filling function
uint8_t * data[3]
Definition: h264.h:352
void ff_vdpau_h264_picture_start(H264Context *h)
uint8_t nb_components
The number of components each pixel has, (1-4)
Definition: pixdesc.h:71
static void h264_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type, int(*mv)[2][4][2], int mb_x, int mb_y, int mb_intra, int mb_skipped)
Definition: h264.c:62
int sei_anticlockwise_rotation
Definition: h264.h:742
void(* draw_horiz_band)(struct AVCodecContext *s, const AVFrame *src, int offset[AV_NUM_DATA_POINTERS], int y, int type, int height)
If non NULL, 'draw_horiz_band' is called by the libavcodec decoder to draw a horizontal band...
Definition: avcodec.h:1765
#define CONFIG_MPEGVIDEO
Definition: config.h:583
Definition: h264.h:114
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:242
int frame_num_offset
for POC type 2
Definition: h264.h:654
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:482
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:2900
FPA sei_fpa
Definition: h264.h:784
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
int x264_build
Definition: h264.h:617
uint32_t * mb2br_xy
Definition: h264.h:571
uint8_t * er_temp_buffer
#define OFFSET(x)
Definition: h264.c:1987
#define FFMIN(a, b)
Definition: common.h:92
uint16_t * slice_table
slice_table_base + 2*mb_stride + 1
Definition: h264.h:586
#define H264_MAX_THREADS
Definition: h264.h:47
float y
int poc_cycle_length
num_ref_frames_in_pic_order_cnt_cycle
Definition: h264.h:186
int reference
Definition: h264.h:341
#define FIELD_PICTURE(h)
Definition: h264.h:74
int sei_frame_packing_present
frame_packing_arrangment SEI message
Definition: h264.h:733
int width
picture width / height.
Definition: avcodec.h:1691
int redundant_pic_count
Definition: h264.h:437
int nb_slice_ctx
Definition: h264.h:532
#define FF_PROFILE_H264_HIGH_10_INTRA
Definition: avcodec.h:3163
uint32_t * mb_type
Definition: h264.h:320
#define AV_FRAME_FLAG_CORRUPT
The frame data may be corrupted, e.g.
Definition: frame.h:474
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
SPS sps
current sps
Definition: h264.h:576
int32_t
PPS * pps_buffers[MAX_PPS_COUNT]
Definition: h264.h:640
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
Definition: stereo3d.h:114
int sei_hflip
Definition: h264.h:743
#define MAX_SPS_COUNT
Definition: h264.h:49
int ff_h264_decode_picture_parameter_set(H264Context *h, int bit_length)
Decode PPS.
Definition: h264_ps.c:589
Context Adaptive Binary Arithmetic Coder inline functions.
int mmco_reset
Definition: h264.h:681
H264SliceContext * slice_ctx
Definition: h264.h:531
int poc_lsb
Definition: h264.h:647
int reference
Definition: h264.h:355
static int h264_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: h264.c:1795
int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
Definition: avcodec.h:1650
int top_borders_allocated[2]
Definition: h264.h:475
uint8_t active_format_description
Definition: h264.h:749
int chroma_log2_weight_denom
Definition: h264.h:386
static int output_frame(H264Context *h, AVFrame *dst, H264Picture *srcp)
Definition: h264.c:1739
#define LEFT_DC_PRED8x8
Definition: h264pred.h:74
void av_display_rotation_set(int32_t matrix[9], double angle)
Initialize a transformation matrix describing a pure counterclockwise rotation by the specified angle...
Definition: display.c:50
#define PART_NOT_AVAILABLE
Definition: h264.h:562
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
Definition: avcodec.h:3043
#define SLICE_FLAG_ALLOW_FIELD
allow draw_horiz_band() with field slices (MPEG2 field pics)
Definition: avcodec.h:2046
uint8_t * edge_emu_buffer
Definition: h264.h:471
int dequant_coeff_pps
reinit tables when pps changes
Definition: h264.h:642
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: avcodec.h:924
SPS * sps_buffers[MAX_SPS_COUNT]
Definition: h264.h:639
static const int8_t mv[256][2]
Definition: 4xm.c:77
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:232
short offset_for_ref_frame[256]
Definition: h264.h:213
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:127
int ff_h264_slice_context_init(H264Context *h, H264SliceContext *sl)
Init context Allocate buffers which are not shared amongst multiple threads.
Definition: h264.c:456
int mb_stride
Definition: h264.h:621
AVCodecContext * avctx
Definition: h264.h:519
AVS_Value src
Definition: avisynth_c.h:482
H264 / AVC / MPEG4 part10 codec data table
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:84
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:3055
static int get_bit_length(H264Context *h, const uint8_t *buf, const uint8_t *ptr, int dst_length, int i, int next_avc)
Definition: h264.c:1291
1: top field
Definition: h264.h:148
enum AVCodecID codec_id
Definition: avcodec.h:1529
void ff_h264_remove_all_refs(H264Context *h)
Definition: h264_refs.c:512
int prev_frame_num
frame_num of the last pic for POC type 1/2
Definition: h264.h:656
ERPicture next_pic
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:199
#define FF_CODEC_PROPERTY_CLOSED_CAPTIONS
Definition: avcodec.h:3447
int next_outputed_poc
Definition: h264.h:674
int ff_h264_decode_sei(H264Context *h)
Decode SEI.
Definition: h264_sei.c:419
int poc_msb
Definition: h264.h:648
int field_poc[2]
top/bottom POC
Definition: h264.h:328
int debug
debug
Definition: avcodec.h:2852
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
#define FF_PROFILE_H264_HIGH_444_PREDICTIVE
Definition: avcodec.h:3167
int max_contexts
Max number of threads / contexts.
Definition: h264.h:701
int recovery_frame
recovery_frame is the frame_num at which the next frame should be fully constructed.
Definition: h264.h:792
main external API structure.
Definition: avcodec.h:1512
void av_display_matrix_flip(int32_t matrix[9], int hflip, int vflip)
Flip the input matrix horizontally and/or vertically.
Definition: display.c:65
static void decode_postinit(H264Context *h, int setup_finished)
Run setup operations that must be run after slice header decoding.
Definition: h264.c:728
int ff_h264_alloc_tables(H264Context *h)
Allocate tables.
Definition: h264.c:395
2: bottom field
Definition: h264.h:149
uint8_t * data
Definition: frame.h:136
int ff_h264_check_intra4x4_pred_mode(const H264Context *h, H264SliceContext *sl)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264.c:137
void * buf
Definition: avisynth_c.h:553
int frame_packing_arrangement_type
Definition: h264.h:734
void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_table, uint32_t *mbtype_table, int8_t *qscale_table, int16_t(*motion_val[2])[2], int *low_delay, int mb_width, int mb_height, int mb_stride, int quarter_sample)
Print debugging info for the given picture.
Definition: mpegvideo.c:1569
int8_t * qscale_table
Definition: h264.h:314
int extradata_size
Definition: avcodec.h:1628
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:69
H.264 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstre...
Definition: pixfmt.h:109
int constraint_set_flags
constraint_set[0-3]_flag
Definition: h264.h:230
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:305
SEI_PicStructType sei_pic_struct
pic_struct in picture timing SEI message
Definition: h264.h:720
BYTE int const BYTE int int int height
Definition: avisynth_c.h:676
int ff_h264_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: h264_slice.c:433
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:3054
int slice_flags
slice flags
Definition: avcodec.h:2044
static int get_avc_nalsize(H264Context *h, const uint8_t *buf, int buf_size, int *buf_index)
Definition: h264.h:1190
Describe the class of an AVClass context structure.
Definition: log.h:67
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
Definition: frame.c:590
uint8_t non_zero_count_cache[15 *8]
non zero coeff count cache.
Definition: h264.h:481
static av_cold int h264_decode_end(AVCodecContext *avctx)
Definition: h264.c:1972
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:250
Definition: h264.h:120
int8_t * ref_index[2]
Definition: h264.h:326
int use_weight_chroma
Definition: h264.h:384
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:415
int pixel_shift
0 for 8-bit H264, 1 for high-bit-depth H264
Definition: h264.h:534
int mmco_reset
MMCO_RESET set this 1.
Definition: h264.h:331
H264Picture * cur_pic_ptr
Definition: h264.h:527
int mb_mbaff
mb_aff_frame && mb_field_decoding_flag
Definition: h264.h:435
int enable_er
Definition: h264.h:829
int frame_packing_arrangement_cancel_flag
is previous arrangement canceled, -1 if never received
Definition: h264.h:264
#define FF_PROFILE_H264_CAVLC_444
Definition: avcodec.h:3169
int allocate_progress
Whether to allocate progress for frame threading.
Definition: internal.h:115
int log2_max_poc_lsb
log2_max_pic_order_cnt_lsb_minus4
Definition: h264.h:182
6: bottom field, top field, bottom field repeated, in that order
Definition: h264.h:153
#define FF_PROFILE_H264_INTRA
Definition: avcodec.h:3155
static int is_extra(const uint8_t *buf, int buf_size)
Definition: h264.c:1773
AVCodecContext * avctx
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:245
static const uint8_t start_code[]
Definition: h264.c:1289
Views are on top of each other.
Definition: stereo3d.h:55
int linesize[3]
Definition: h264.h:353
int pic_struct_present_flag
Definition: h264.h:221
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:32
unsigned int list_count
Definition: h264.h:461
av_cold void ff_h264_free_context(H264Context *h)
Free any data that may have been allocated in the H264 context like SPS, PPS etc. ...
Definition: h264.c:1946
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:182
int has_recovery_point
Definition: h264.h:807
Views are next to each other.
Definition: stereo3d.h:45
#define MAX_MBPAIR_SIZE
Definition: h264.h:56
#define CONFIG_ERROR_RESILIENCE
Definition: config.h:518
static int decode(AVCodecContext *avctx, void *data, int *got_sub, AVPacket *avpkt)
Definition: ccaption_dec.c:521
static void idr(H264Context *h)
instantaneous decoder refresh.
Definition: h264.c:1068
int ff_h264_decode_seq_parameter_set(H264Context *h, int ignore_truncation)
Decode SPS.
Definition: h264_ps.c:303
discard all non reference
Definition: avcodec.h:685
AVBufferPool * qscale_table_pool
Definition: h264.h:831
H264Picture * next_output_pic
Definition: h264.h:673
int slice_context_count
Definition: h264.h:703
AVBufferPool * motion_val_pool
Definition: h264.h:833
#define FF_PROFILE_H264_HIGH
Definition: avcodec.h:3161
#define SLICE_SINGLETHREAD
Definition: h264.h:1223
common internal api header.
if(ret< 0)
Definition: vf_mcdeint.c:280
int ff_h264_decode_extradata(H264Context *h, const uint8_t *buf, int size)
Definition: h264.c:524
#define FRAME_RECOVERED_SEI
Sufficient number of frames have been decoded since a SEI recovery point, so all the following frames...
Definition: h264.h:803
#define FF_ALLOC_OR_GOTO(ctx, p, size, label)
Definition: internal.h:132
uint16_t * slice_table_base
Definition: h264.h:644
int log2_max_frame_num
log2_max_frame_num_minus4 + 4
Definition: h264.h:180
int missing_fields
Definition: h264.h:809
int16_t * dc_val[3]
H.264 / AVC / MPEG4 part10 motion vector predicion.
Bi-dir predicted.
Definition: avutil.h:268
const char * ff_h264_sei_stereo_mode(H264Context *h)
Get stereo_mode string from the h264 frame_packing_arrangement.
Definition: h264_sei.c:489
AVProfile.
Definition: avcodec.h:3470
int workaround_bugs
Work around bugs in encoders which sometimes cannot be detected automatically.
Definition: avcodec.h:2796
int cur_chroma_format_idc
Definition: h264.h:821
int8_t * intra4x4_pred_mode
Definition: h264.h:401
unsigned properties
Definition: avcodec.h:3445
int den
denominator
Definition: rational.h:45
uint8_t * rbsp_buffer
Definition: h264.h:510
int sei_ct_type
Bit set of clock types for fields/frames in picture timing SEI message.
Definition: h264.h:758
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:636
#define FF_PROFILE_H264_CONSTRAINED_BASELINE
Definition: avcodec.h:3158
void * priv_data
Definition: avcodec.h:1554
int ff_h264_field_end(H264Context *h, H264SliceContext *sl, int in_setup)
Definition: h264_picture.c:154
#define PICT_FRAME
Definition: mpegutils.h:35
int prev_poc_lsb
poc_lsb of the last reference pic for POC type 0
Definition: h264.h:653
#define CONFIG_H264_VDPAU_DECODER
Definition: config.h:697
void ff_h264_set_erpic(ERPicture *dst, H264Picture *src)
Definition: h264_picture.c:131
int8_t ref_cache[2][5 *8]
Definition: h264.h:487
#define AV_CODEC_FLAG_OUTPUT_CORRUPT
Output even those frames that might be corrupted.
Definition: avcodec.h:743
Definition: h264.h:113
#define SLICE_SKIPED
Definition: h264.h:1224
#define VD
Definition: h264.c:1988
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:372
const uint16_t ff_h264_mb_sizes[4]
Definition: h264.c:54
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:1562
#define FF_PROFILE_H264_BASELINE
Definition: avcodec.h:3157
int luma_log2_weight_denom
Definition: h264.h:385
int chroma_weight[48][2][2][2]
Definition: h264.h:391
Views are packed in a checkerboard-like structure per pixel.
Definition: stereo3d.h:76
#define FF_COMPLIANCE_STRICT
Strictly conform to all the things in the spec no matter what consequences.
Definition: avcodec.h:2832
H264Picture cur_pic
Definition: h264.h:528
int sei_display_orientation_present
display orientation SEI message
Definition: h264.h:741
int content_interpretation_type
Definition: h264.h:735
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:237
Views are packed per column.
Definition: stereo3d.h:107
int mb_width
Definition: h264.h:620
enum AVPictureType pict_type
Definition: h264.h:711
int current_slice
current slice number, used to initialize slice_num of each thread/context
Definition: h264.h:693
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:1614
#define FF_PROFILE_H264_HIGH_422
Definition: avcodec.h:3164
uint32_t * mb2b_xy
Definition: h264.h:570
H264Ref ref_list[2][48]
0..15: frame refs, 16..47: mbaff field refs.
Definition: h264.h:462
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> out
int delta_poc_bottom
Definition: h264.h:649
H264Picture last_pic_for_ec
Definition: h264.h:529
int au_pps_id
pps_id of current access unit
Definition: h264.h:579
static void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.h:228
int height
Definition: frame.h:220
int crop_top
Definition: h264.h:348
#define AV_CODEC_FLAG2_SHOW_ALL
Show all frames before the first keyframe.
Definition: avcodec.h:831
unsigned int left_samples_available
Definition: h264.h:419
uint8_t(*[2] mvd_table)[2]
Definition: h264.h:500
#define av_freep(p)
static int init_thread_copy(AVCodecContext *avctx)
Definition: alac.c:646
uint8_t * av_packet_get_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int *size)
Get side information from packet.
Definition: avpacket.c:329
void ff_vdpau_add_data_chunk(uint8_t *data, const uint8_t *buf, int buf_size)
int8_t * intra4x4_pred_mode
Definition: h264.h:556
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
Definition: avcodec.h:3664
8: frame tripling
Definition: h264.h:155
#define AV_RN64A(p)
Definition: intreadwrite.h:530
int mb_field_decoding_flag
Definition: h264.h:434
uint8_t(* non_zero_count)[48]
Definition: h264.h:559
exp golomb vlc stuff
uint8_t * bipred_scratchpad
Definition: h264.h:470
AVPixelFormat
Pixel format.
Definition: pixfmt.h:65
This structure stores compressed data.
Definition: avcodec.h:1410
int sei_recovery_frame_cnt
recovery_frame_cnt from SEI message
Definition: h264.h:777
int droppable
Definition: h264.h:548
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:857
int strict_std_compliance
strictly follow the standard (MPEG4, ...).
Definition: avcodec.h:2830
#define STARTCODE_TEST
int nal_ref_idc
Definition: h264.h:627
GetBitContext gb
Definition: h264.h:364
for(j=16;j >0;--j)
#define FF_ALLOCZ_OR_GOTO(ctx, p, size, label)
Definition: internal.h:141
int b_stride
Definition: h264.h:572
Context Adaptive Binary Arithmetic Coder.
int chroma_weight_flag[2]
7.4.3.2 chroma_weight_lX_flag
Definition: h264.h:388
static const AVProfile profiles[]
Definition: h264.c:2003
void ff_h264_init_dequant_tables(H264Context *h)
Definition: h264_slice.c:367