FFmpeg  2.8.17
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
vp8.c
Go to the documentation of this file.
1 /*
2  * VP7/VP8 compatible video decoder
3  *
4  * Copyright (C) 2010 David Conrad
5  * Copyright (C) 2010 Ronald S. Bultje
6  * Copyright (C) 2010 Fiona Glaser
7  * Copyright (C) 2012 Daniel Kang
8  * Copyright (C) 2014 Peter Ross
9  *
10  * This file is part of FFmpeg.
11  *
12  * FFmpeg is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU Lesser General Public
14  * License as published by the Free Software Foundation; either
15  * version 2.1 of the License, or (at your option) any later version.
16  *
17  * FFmpeg is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20  * Lesser General Public License for more details.
21  *
22  * You should have received a copy of the GNU Lesser General Public
23  * License along with FFmpeg; if not, write to the Free Software
24  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25  */
26 
27 #include "libavutil/imgutils.h"
28 
29 #include "avcodec.h"
30 #include "internal.h"
31 #include "rectangle.h"
32 #include "thread.h"
33 #include "vp8.h"
34 #include "vp8data.h"
35 
36 #if ARCH_ARM
37 # include "arm/vp8.h"
38 #endif
39 
40 #if CONFIG_VP7_DECODER && CONFIG_VP8_DECODER
41 #define VPX(vp7, f) (vp7 ? vp7_ ## f : vp8_ ## f)
42 #elif CONFIG_VP7_DECODER
43 #define VPX(vp7, f) vp7_ ## f
44 #else // CONFIG_VP8_DECODER
45 #define VPX(vp7, f) vp8_ ## f
46 #endif
47 
48 static void free_buffers(VP8Context *s)
49 {
50  int i;
51  if (s->thread_data)
52  for (i = 0; i < MAX_THREADS; i++) {
53 #if HAVE_THREADS
54  pthread_cond_destroy(&s->thread_data[i].cond);
56 #endif
58  }
59  av_freep(&s->thread_data);
62  av_freep(&s->top_nnz);
63  av_freep(&s->top_border);
64 
65  s->macroblocks = NULL;
66 }
67 
68 static int vp8_alloc_frame(VP8Context *s, VP8Frame *f, int ref)
69 {
70  int ret;
71  if ((ret = ff_thread_get_buffer(s->avctx, &f->tf,
72  ref ? AV_GET_BUFFER_FLAG_REF : 0)) < 0)
73  return ret;
74  if (!(f->seg_map = av_buffer_allocz(s->mb_width * s->mb_height))) {
76  return AVERROR(ENOMEM);
77  }
78  return 0;
79 }
80 
82 {
85 }
86 
87 #if CONFIG_VP8_DECODER
88 static int vp8_ref_frame(VP8Context *s, VP8Frame *dst, VP8Frame *src)
89 {
90  int ret;
91 
92  vp8_release_frame(s, dst);
93 
94  if ((ret = ff_thread_ref_frame(&dst->tf, &src->tf)) < 0)
95  return ret;
96  if (src->seg_map &&
97  !(dst->seg_map = av_buffer_ref(src->seg_map))) {
98  vp8_release_frame(s, dst);
99  return AVERROR(ENOMEM);
100  }
101 
102  return 0;
103 }
104 #endif /* CONFIG_VP8_DECODER */
105 
106 static void vp8_decode_flush_impl(AVCodecContext *avctx, int free_mem)
107 {
108  VP8Context *s = avctx->priv_data;
109  int i;
110 
111  for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++)
112  vp8_release_frame(s, &s->frames[i]);
113  memset(s->framep, 0, sizeof(s->framep));
114 
115  if (free_mem)
116  free_buffers(s);
117 }
118 
119 static void vp8_decode_flush(AVCodecContext *avctx)
120 {
121  vp8_decode_flush_impl(avctx, 0);
122 }
123 
125 {
126  VP8Frame *frame = NULL;
127  int i;
128 
129  // find a free buffer
130  for (i = 0; i < 5; i++)
131  if (&s->frames[i] != s->framep[VP56_FRAME_CURRENT] &&
132  &s->frames[i] != s->framep[VP56_FRAME_PREVIOUS] &&
133  &s->frames[i] != s->framep[VP56_FRAME_GOLDEN] &&
134  &s->frames[i] != s->framep[VP56_FRAME_GOLDEN2]) {
135  frame = &s->frames[i];
136  break;
137  }
138  if (i == 5) {
139  av_log(s->avctx, AV_LOG_FATAL, "Ran out of free frames!\n");
140  abort();
141  }
142  if (frame->tf.f->data[0])
143  vp8_release_frame(s, frame);
144 
145  return frame;
146 }
147 
148 static av_always_inline
149 int update_dimensions(VP8Context *s, int width, int height, int is_vp7)
150 {
151  AVCodecContext *avctx = s->avctx;
152  int i, ret;
153 
154  if (width != s->avctx->width || ((width+15)/16 != s->mb_width || (height+15)/16 != s->mb_height) && s->macroblocks_base ||
155  height != s->avctx->height) {
157 
158  ret = ff_set_dimensions(s->avctx, width, height);
159  if (ret < 0)
160  return ret;
161  }
162 
163  s->mb_width = (s->avctx->coded_width + 15) / 16;
164  s->mb_height = (s->avctx->coded_height + 15) / 16;
165 
166  s->mb_layout = is_vp7 || avctx->active_thread_type == FF_THREAD_SLICE &&
167  avctx->thread_count > 1;
168  if (!s->mb_layout) { // Frame threading and one thread
169  s->macroblocks_base = av_mallocz((s->mb_width + s->mb_height * 2 + 1) *
170  sizeof(*s->macroblocks));
172  } else // Sliced threading
173  s->macroblocks_base = av_mallocz((s->mb_width + 2) * (s->mb_height + 2) *
174  sizeof(*s->macroblocks));
175  s->top_nnz = av_mallocz(s->mb_width * sizeof(*s->top_nnz));
176  s->top_border = av_mallocz((s->mb_width + 1) * sizeof(*s->top_border));
178 
179  if (!s->macroblocks_base || !s->top_nnz || !s->top_border ||
180  !s->thread_data || (!s->intra4x4_pred_mode_top && !s->mb_layout)) {
181  free_buffers(s);
182  return AVERROR(ENOMEM);
183  }
184 
185  for (i = 0; i < MAX_THREADS; i++) {
187  av_mallocz(s->mb_width * sizeof(*s->thread_data[0].filter_strength));
188  if (!s->thread_data[i].filter_strength) {
189  free_buffers(s);
190  return AVERROR(ENOMEM);
191  }
192 #if HAVE_THREADS
193  pthread_mutex_init(&s->thread_data[i].lock, NULL);
194  pthread_cond_init(&s->thread_data[i].cond, NULL);
195 #endif
196  }
197 
198  s->macroblocks = s->macroblocks_base + 1;
199 
200  return 0;
201 }
202 
204 {
205  return update_dimensions(s, width, height, IS_VP7);
206 }
207 
209 {
210  return update_dimensions(s, width, height, IS_VP8);
211 }
212 
213 
215 {
216  VP56RangeCoder *c = &s->c;
217  int i;
218 
220 
221  if (vp8_rac_get(c)) { // update segment feature data
223 
224  for (i = 0; i < 4; i++)
226 
227  for (i = 0; i < 4; i++)
229  }
230  if (s->segmentation.update_map)
231  for (i = 0; i < 3; i++)
232  s->prob->segmentid[i] = vp8_rac_get(c) ? vp8_rac_get_uint(c, 8) : 255;
233 }
234 
236 {
237  VP56RangeCoder *c = &s->c;
238  int i;
239 
240  for (i = 0; i < 4; i++) {
241  if (vp8_rac_get(c)) {
242  s->lf_delta.ref[i] = vp8_rac_get_uint(c, 6);
243 
244  if (vp8_rac_get(c))
245  s->lf_delta.ref[i] = -s->lf_delta.ref[i];
246  }
247  }
248 
249  for (i = MODE_I4x4; i <= VP8_MVMODE_SPLIT; i++) {
250  if (vp8_rac_get(c)) {
251  s->lf_delta.mode[i] = vp8_rac_get_uint(c, 6);
252 
253  if (vp8_rac_get(c))
254  s->lf_delta.mode[i] = -s->lf_delta.mode[i];
255  }
256  }
257 }
258 
259 static int setup_partitions(VP8Context *s, const uint8_t *buf, int buf_size)
260 {
261  const uint8_t *sizes = buf;
262  int i;
263  int ret;
264 
265  s->num_coeff_partitions = 1 << vp8_rac_get_uint(&s->c, 2);
266 
267  buf += 3 * (s->num_coeff_partitions - 1);
268  buf_size -= 3 * (s->num_coeff_partitions - 1);
269  if (buf_size < 0)
270  return -1;
271 
272  for (i = 0; i < s->num_coeff_partitions - 1; i++) {
273  int size = AV_RL24(sizes + 3 * i);
274  if (buf_size - size < 0)
275  return -1;
276 
277  ret = ff_vp56_init_range_decoder(&s->coeff_partition[i], buf, size);
278  if (ret < 0)
279  return ret;
280  buf += size;
281  buf_size -= size;
282  }
283  return ff_vp56_init_range_decoder(&s->coeff_partition[i], buf, buf_size);
284 }
285 
286 static void vp7_get_quants(VP8Context *s)
287 {
288  VP56RangeCoder *c = &s->c;
289 
290  int yac_qi = vp8_rac_get_uint(c, 7);
291  int ydc_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
292  int y2dc_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
293  int y2ac_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
294  int uvdc_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
295  int uvac_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
296 
297  s->qmat[0].luma_qmul[0] = vp7_ydc_qlookup[ydc_qi];
298  s->qmat[0].luma_qmul[1] = vp7_yac_qlookup[yac_qi];
299  s->qmat[0].luma_dc_qmul[0] = vp7_y2dc_qlookup[y2dc_qi];
300  s->qmat[0].luma_dc_qmul[1] = vp7_y2ac_qlookup[y2ac_qi];
301  s->qmat[0].chroma_qmul[0] = FFMIN(vp7_ydc_qlookup[uvdc_qi], 132);
302  s->qmat[0].chroma_qmul[1] = vp7_yac_qlookup[uvac_qi];
303 }
304 
305 static void vp8_get_quants(VP8Context *s)
306 {
307  VP56RangeCoder *c = &s->c;
308  int i, base_qi;
309 
310  int yac_qi = vp8_rac_get_uint(c, 7);
311  int ydc_delta = vp8_rac_get_sint(c, 4);
312  int y2dc_delta = vp8_rac_get_sint(c, 4);
313  int y2ac_delta = vp8_rac_get_sint(c, 4);
314  int uvdc_delta = vp8_rac_get_sint(c, 4);
315  int uvac_delta = vp8_rac_get_sint(c, 4);
316 
317  for (i = 0; i < 4; i++) {
318  if (s->segmentation.enabled) {
319  base_qi = s->segmentation.base_quant[i];
320  if (!s->segmentation.absolute_vals)
321  base_qi += yac_qi;
322  } else
323  base_qi = yac_qi;
324 
325  s->qmat[i].luma_qmul[0] = vp8_dc_qlookup[av_clip_uintp2(base_qi + ydc_delta, 7)];
326  s->qmat[i].luma_qmul[1] = vp8_ac_qlookup[av_clip_uintp2(base_qi, 7)];
327  s->qmat[i].luma_dc_qmul[0] = vp8_dc_qlookup[av_clip_uintp2(base_qi + y2dc_delta, 7)] * 2;
328  /* 101581>>16 is equivalent to 155/100 */
329  s->qmat[i].luma_dc_qmul[1] = vp8_ac_qlookup[av_clip_uintp2(base_qi + y2ac_delta, 7)] * 101581 >> 16;
330  s->qmat[i].chroma_qmul[0] = vp8_dc_qlookup[av_clip_uintp2(base_qi + uvdc_delta, 7)];
331  s->qmat[i].chroma_qmul[1] = vp8_ac_qlookup[av_clip_uintp2(base_qi + uvac_delta, 7)];
332 
333  s->qmat[i].luma_dc_qmul[1] = FFMAX(s->qmat[i].luma_dc_qmul[1], 8);
334  s->qmat[i].chroma_qmul[0] = FFMIN(s->qmat[i].chroma_qmul[0], 132);
335  }
336 }
337 
338 /**
339  * Determine which buffers golden and altref should be updated with after this frame.
340  * The spec isn't clear here, so I'm going by my understanding of what libvpx does
341  *
342  * Intra frames update all 3 references
343  * Inter frames update VP56_FRAME_PREVIOUS if the update_last flag is set
344  * If the update (golden|altref) flag is set, it's updated with the current frame
345  * if update_last is set, and VP56_FRAME_PREVIOUS otherwise.
346  * If the flag is not set, the number read means:
347  * 0: no update
348  * 1: VP56_FRAME_PREVIOUS
349  * 2: update golden with altref, or update altref with golden
350  */
352 {
353  VP56RangeCoder *c = &s->c;
354 
355  if (update)
356  return VP56_FRAME_CURRENT;
357 
358  switch (vp8_rac_get_uint(c, 2)) {
359  case 1:
360  return VP56_FRAME_PREVIOUS;
361  case 2:
363  }
364  return VP56_FRAME_NONE;
365 }
366 
368 {
369  int i, j;
370  for (i = 0; i < 4; i++)
371  for (j = 0; j < 16; j++)
372  memcpy(s->prob->token[i][j], vp8_token_default_probs[i][vp8_coeff_band[j]],
373  sizeof(s->prob->token[i][j]));
374 }
375 
377 {
378  VP56RangeCoder *c = &s->c;
379  int i, j, k, l, m;
380 
381  for (i = 0; i < 4; i++)
382  for (j = 0; j < 8; j++)
383  for (k = 0; k < 3; k++)
384  for (l = 0; l < NUM_DCT_TOKENS-1; l++)
386  int prob = vp8_rac_get_uint(c, 8);
387  for (m = 0; vp8_coeff_band_indexes[j][m] >= 0; m++)
388  s->prob->token[i][vp8_coeff_band_indexes[j][m]][k][l] = prob;
389  }
390 }
391 
392 #define VP7_MVC_SIZE 17
393 #define VP8_MVC_SIZE 19
394 
396  int mvc_size)
397 {
398  VP56RangeCoder *c = &s->c;
399  int i, j;
400 
401  if (vp8_rac_get(c))
402  for (i = 0; i < 4; i++)
403  s->prob->pred16x16[i] = vp8_rac_get_uint(c, 8);
404  if (vp8_rac_get(c))
405  for (i = 0; i < 3; i++)
406  s->prob->pred8x8c[i] = vp8_rac_get_uint(c, 8);
407 
408  // 17.2 MV probability update
409  for (i = 0; i < 2; i++)
410  for (j = 0; j < mvc_size; j++)
412  s->prob->mvc[i][j] = vp8_rac_get_nn(c);
413 }
414 
415 static void update_refs(VP8Context *s)
416 {
417  VP56RangeCoder *c = &s->c;
418 
419  int update_golden = vp8_rac_get(c);
420  int update_altref = vp8_rac_get(c);
421 
422  s->update_golden = ref_to_update(s, update_golden, VP56_FRAME_GOLDEN);
423  s->update_altref = ref_to_update(s, update_altref, VP56_FRAME_GOLDEN2);
424 }
425 
426 static void copy_chroma(AVFrame *dst, AVFrame *src, int width, int height)
427 {
428  int i, j;
429 
430  for (j = 1; j < 3; j++) {
431  for (i = 0; i < height / 2; i++)
432  memcpy(dst->data[j] + i * dst->linesize[j],
433  src->data[j] + i * src->linesize[j], width / 2);
434  }
435 }
436 
437 static void fade(uint8_t *dst, int dst_linesize,
438  const uint8_t *src, int src_linesize,
439  int width, int height,
440  int alpha, int beta)
441 {
442  int i, j;
443  for (j = 0; j < height; j++) {
444  for (i = 0; i < width; i++) {
445  uint8_t y = src[j * src_linesize + i];
446  dst[j * dst_linesize + i] = av_clip_uint8(y + ((y * beta) >> 8) + alpha);
447  }
448  }
449 }
450 
452 {
453  int alpha = (int8_t) vp8_rac_get_uint(c, 8);
454  int beta = (int8_t) vp8_rac_get_uint(c, 8);
455  int ret;
456 
457  if (!s->keyframe && (alpha || beta)) {
458  int width = s->mb_width * 16;
459  int height = s->mb_height * 16;
460  AVFrame *src, *dst;
461 
462  if (!s->framep[VP56_FRAME_PREVIOUS] ||
463  !s->framep[VP56_FRAME_GOLDEN]) {
464  av_log(s->avctx, AV_LOG_WARNING, "Discarding interframe without a prior keyframe!\n");
465  return AVERROR_INVALIDDATA;
466  }
467 
468  dst =
469  src = s->framep[VP56_FRAME_PREVIOUS]->tf.f;
470 
471  /* preserve the golden frame, write a new previous frame */
474  if ((ret = vp8_alloc_frame(s, s->framep[VP56_FRAME_PREVIOUS], 1)) < 0)
475  return ret;
476 
477  dst = s->framep[VP56_FRAME_PREVIOUS]->tf.f;
478 
479  copy_chroma(dst, src, width, height);
480  }
481 
482  fade(dst->data[0], dst->linesize[0],
483  src->data[0], src->linesize[0],
484  width, height, alpha, beta);
485  }
486 
487  return 0;
488 }
489 
490 static int vp7_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
491 {
492  VP56RangeCoder *c = &s->c;
493  int part1_size, hscale, vscale, i, j, ret;
494  int width = s->avctx->width;
495  int height = s->avctx->height;
496 
497  if (buf_size < 4) {
498  return AVERROR_INVALIDDATA;
499  }
500 
501  s->profile = (buf[0] >> 1) & 7;
502  if (s->profile > 1) {
503  avpriv_request_sample(s->avctx, "Unknown profile %d", s->profile);
504  return AVERROR_INVALIDDATA;
505  }
506 
507  s->keyframe = !(buf[0] & 1);
508  s->invisible = 0;
509  part1_size = AV_RL24(buf) >> 4;
510 
511  if (buf_size < 4 - s->profile + part1_size) {
512  av_log(s->avctx, AV_LOG_ERROR, "Buffer size %d is too small, needed : %d\n", buf_size, 4 - s->profile + part1_size);
513  return AVERROR_INVALIDDATA;
514  }
515 
516  buf += 4 - s->profile;
517  buf_size -= 4 - s->profile;
518 
519  memcpy(s->put_pixels_tab, s->vp8dsp.put_vp8_epel_pixels_tab, sizeof(s->put_pixels_tab));
520 
521  ret = ff_vp56_init_range_decoder(c, buf, part1_size);
522  if (ret < 0)
523  return ret;
524  buf += part1_size;
525  buf_size -= part1_size;
526 
527  /* A. Dimension information (keyframes only) */
528  if (s->keyframe) {
529  width = vp8_rac_get_uint(c, 12);
530  height = vp8_rac_get_uint(c, 12);
531  hscale = vp8_rac_get_uint(c, 2);
532  vscale = vp8_rac_get_uint(c, 2);
533  if (hscale || vscale)
534  avpriv_request_sample(s->avctx, "Upscaling");
535 
539  sizeof(s->prob->pred16x16));
541  sizeof(s->prob->pred8x8c));
542  for (i = 0; i < 2; i++)
543  memcpy(s->prob->mvc[i], vp7_mv_default_prob[i],
544  sizeof(vp7_mv_default_prob[i]));
545  memset(&s->segmentation, 0, sizeof(s->segmentation));
546  memset(&s->lf_delta, 0, sizeof(s->lf_delta));
547  memcpy(s->prob[0].scan, zigzag_scan, sizeof(s->prob[0].scan));
548  }
549 
550  if (s->keyframe || s->profile > 0)
551  memset(s->inter_dc_pred, 0 , sizeof(s->inter_dc_pred));
552 
553  /* B. Decoding information for all four macroblock-level features */
554  for (i = 0; i < 4; i++) {
555  s->feature_enabled[i] = vp8_rac_get(c);
556  if (s->feature_enabled[i]) {
558 
559  for (j = 0; j < 3; j++)
560  s->feature_index_prob[i][j] =
561  vp8_rac_get(c) ? vp8_rac_get_uint(c, 8) : 255;
562 
563  if (vp7_feature_value_size[s->profile][i])
564  for (j = 0; j < 4; j++)
565  s->feature_value[i][j] =
567  }
568  }
569 
570  s->segmentation.enabled = 0;
571  s->segmentation.update_map = 0;
572  s->lf_delta.enabled = 0;
573 
574  s->num_coeff_partitions = 1;
575  ret = ff_vp56_init_range_decoder(&s->coeff_partition[0], buf, buf_size);
576  if (ret < 0)
577  return ret;
578 
579  if (!s->macroblocks_base || /* first frame */
580  width != s->avctx->width || height != s->avctx->height ||
581  (width + 15) / 16 != s->mb_width || (height + 15) / 16 != s->mb_height) {
582  if ((ret = vp7_update_dimensions(s, width, height)) < 0)
583  return ret;
584  }
585 
586  /* C. Dequantization indices */
587  vp7_get_quants(s);
588 
589  /* D. Golden frame update flag (a Flag) for interframes only */
590  if (!s->keyframe) {
593  }
594 
595  s->update_last = 1;
596  s->update_probabilities = 1;
597  s->fade_present = 1;
598 
599  if (s->profile > 0) {
601  if (!s->update_probabilities)
602  s->prob[1] = s->prob[0];
603 
604  if (!s->keyframe)
605  s->fade_present = vp8_rac_get(c);
606  }
607 
608  if (vpX_rac_is_end(c))
609  return AVERROR_INVALIDDATA;
610  /* E. Fading information for previous frame */
611  if (s->fade_present && vp8_rac_get(c)) {
612  if ((ret = vp7_fade_frame(s ,c)) < 0)
613  return ret;
614  }
615 
616  /* F. Loop filter type */
617  if (!s->profile)
618  s->filter.simple = vp8_rac_get(c);
619 
620  /* G. DCT coefficient ordering specification */
621  if (vp8_rac_get(c))
622  for (i = 1; i < 16; i++)
623  s->prob[0].scan[i] = zigzag_scan[vp8_rac_get_uint(c, 4)];
624 
625  /* H. Loop filter levels */
626  if (s->profile > 0)
627  s->filter.simple = vp8_rac_get(c);
628  s->filter.level = vp8_rac_get_uint(c, 6);
629  s->filter.sharpness = vp8_rac_get_uint(c, 3);
630 
631  /* I. DCT coefficient probability update; 13.3 Token Probability Updates */
633 
634  s->mbskip_enabled = 0;
635 
636  /* J. The remaining frame header data occurs ONLY FOR INTERFRAMES */
637  if (!s->keyframe) {
638  s->prob->intra = vp8_rac_get_uint(c, 8);
639  s->prob->last = vp8_rac_get_uint(c, 8);
641  }
642 
643  return 0;
644 }
645 
646 static int vp8_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
647 {
648  VP56RangeCoder *c = &s->c;
649  int header_size, hscale, vscale, ret;
650  int width = s->avctx->width;
651  int height = s->avctx->height;
652 
653  if (buf_size < 3) {
654  av_log(s->avctx, AV_LOG_ERROR, "Insufficent data (%d) for header\n", buf_size);
655  return AVERROR_INVALIDDATA;
656  }
657 
658  s->keyframe = !(buf[0] & 1);
659  s->profile = (buf[0]>>1) & 7;
660  s->invisible = !(buf[0] & 0x10);
661  header_size = AV_RL24(buf) >> 5;
662  buf += 3;
663  buf_size -= 3;
664 
665  if (s->profile > 3)
666  av_log(s->avctx, AV_LOG_WARNING, "Unknown profile %d\n", s->profile);
667 
668  if (!s->profile)
670  sizeof(s->put_pixels_tab));
671  else // profile 1-3 use bilinear, 4+ aren't defined so whatever
673  sizeof(s->put_pixels_tab));
674 
675  if (header_size > buf_size - 7 * s->keyframe) {
676  av_log(s->avctx, AV_LOG_ERROR, "Header size larger than data provided\n");
677  return AVERROR_INVALIDDATA;
678  }
679 
680  if (s->keyframe) {
681  if (AV_RL24(buf) != 0x2a019d) {
683  "Invalid start code 0x%x\n", AV_RL24(buf));
684  return AVERROR_INVALIDDATA;
685  }
686  width = AV_RL16(buf + 3) & 0x3fff;
687  height = AV_RL16(buf + 5) & 0x3fff;
688  hscale = buf[4] >> 6;
689  vscale = buf[6] >> 6;
690  buf += 7;
691  buf_size -= 7;
692 
693  if (hscale || vscale)
694  avpriv_request_sample(s->avctx, "Upscaling");
695 
699  sizeof(s->prob->pred16x16));
701  sizeof(s->prob->pred8x8c));
702  memcpy(s->prob->mvc, vp8_mv_default_prob,
703  sizeof(s->prob->mvc));
704  memset(&s->segmentation, 0, sizeof(s->segmentation));
705  memset(&s->lf_delta, 0, sizeof(s->lf_delta));
706  }
707 
708  ret = ff_vp56_init_range_decoder(c, buf, header_size);
709  if (ret < 0)
710  return ret;
711  buf += header_size;
712  buf_size -= header_size;
713 
714  if (s->keyframe) {
715  s->colorspace = vp8_rac_get(c);
716  if (s->colorspace)
717  av_log(s->avctx, AV_LOG_WARNING, "Unspecified colorspace\n");
718  s->fullrange = vp8_rac_get(c);
719  }
720 
721  if ((s->segmentation.enabled = vp8_rac_get(c)))
723  else
724  s->segmentation.update_map = 0; // FIXME: move this to some init function?
725 
726  s->filter.simple = vp8_rac_get(c);
727  s->filter.level = vp8_rac_get_uint(c, 6);
728  s->filter.sharpness = vp8_rac_get_uint(c, 3);
729 
730  if ((s->lf_delta.enabled = vp8_rac_get(c)))
731  if (vp8_rac_get(c))
732  update_lf_deltas(s);
733 
734  if (setup_partitions(s, buf, buf_size)) {
735  av_log(s->avctx, AV_LOG_ERROR, "Invalid partitions\n");
736  return AVERROR_INVALIDDATA;
737  }
738 
739  if (!s->macroblocks_base || /* first frame */
740  width != s->avctx->width || height != s->avctx->height ||
741  (width+15)/16 != s->mb_width || (height+15)/16 != s->mb_height)
742  if ((ret = vp8_update_dimensions(s, width, height)) < 0)
743  return ret;
744 
745  vp8_get_quants(s);
746 
747  if (!s->keyframe) {
748  update_refs(s);
750  s->sign_bias[VP56_FRAME_GOLDEN2 /* altref */] = vp8_rac_get(c);
751  }
752 
753  // if we aren't saving this frame's probabilities for future frames,
754  // make a copy of the current probabilities
755  if (!(s->update_probabilities = vp8_rac_get(c)))
756  s->prob[1] = s->prob[0];
757 
758  s->update_last = s->keyframe || vp8_rac_get(c);
759 
761 
762  if ((s->mbskip_enabled = vp8_rac_get(c)))
763  s->prob->mbskip = vp8_rac_get_uint(c, 8);
764 
765  if (!s->keyframe) {
766  s->prob->intra = vp8_rac_get_uint(c, 8);
767  s->prob->last = vp8_rac_get_uint(c, 8);
768  s->prob->golden = vp8_rac_get_uint(c, 8);
770  }
771 
772  return 0;
773 }
774 
775 static av_always_inline
776 void clamp_mv(VP8Context *s, VP56mv *dst, const VP56mv *src)
777 {
778  dst->x = av_clip(src->x, av_clip(s->mv_min.x, INT16_MIN, INT16_MAX),
779  av_clip(s->mv_max.x, INT16_MIN, INT16_MAX));
780  dst->y = av_clip(src->y, av_clip(s->mv_min.y, INT16_MIN, INT16_MAX),
781  av_clip(s->mv_max.y, INT16_MIN, INT16_MAX));
782 }
783 
784 /**
785  * Motion vector coding, 17.1.
786  */
788 {
789  int bit, x = 0;
790 
791  if (vp56_rac_get_prob_branchy(c, p[0])) {
792  int i;
793 
794  for (i = 0; i < 3; i++)
795  x += vp56_rac_get_prob(c, p[9 + i]) << i;
796  for (i = (vp7 ? 7 : 9); i > 3; i--)
797  x += vp56_rac_get_prob(c, p[9 + i]) << i;
798  if (!(x & (vp7 ? 0xF0 : 0xFFF0)) || vp56_rac_get_prob(c, p[12]))
799  x += 8;
800  } else {
801  // small_mvtree
802  const uint8_t *ps = p + 2;
803  bit = vp56_rac_get_prob(c, *ps);
804  ps += 1 + 3 * bit;
805  x += 4 * bit;
806  bit = vp56_rac_get_prob(c, *ps);
807  ps += 1 + bit;
808  x += 2 * bit;
809  x += vp56_rac_get_prob(c, *ps);
810  }
811 
812  return (x && vp56_rac_get_prob(c, p[1])) ? -x : x;
813 }
814 
816 {
817  return read_mv_component(c, p, 1);
818 }
819 
821 {
822  return read_mv_component(c, p, 0);
823 }
824 
825 static av_always_inline
826 const uint8_t *get_submv_prob(uint32_t left, uint32_t top, int is_vp7)
827 {
828  if (is_vp7)
829  return vp7_submv_prob;
830 
831  if (left == top)
832  return vp8_submv_prob[4 - !!left];
833  if (!top)
834  return vp8_submv_prob[2];
835  return vp8_submv_prob[1 - !!left];
836 }
837 
838 /**
839  * Split motion vector prediction, 16.4.
840  * @returns the number of motion vectors parsed (2, 4 or 16)
841  */
842 static av_always_inline
844  int layout, int is_vp7)
845 {
846  int part_idx;
847  int n, num;
848  VP8Macroblock *top_mb;
849  VP8Macroblock *left_mb = &mb[-1];
850  const uint8_t *mbsplits_left = vp8_mbsplits[left_mb->partitioning];
851  const uint8_t *mbsplits_top, *mbsplits_cur, *firstidx;
852  VP56mv *top_mv;
853  VP56mv *left_mv = left_mb->bmv;
854  VP56mv *cur_mv = mb->bmv;
855 
856  if (!layout) // layout is inlined, s->mb_layout is not
857  top_mb = &mb[2];
858  else
859  top_mb = &mb[-s->mb_width - 1];
860  mbsplits_top = vp8_mbsplits[top_mb->partitioning];
861  top_mv = top_mb->bmv;
862 
866  else
867  part_idx = VP8_SPLITMVMODE_8x8;
868  } else {
869  part_idx = VP8_SPLITMVMODE_4x4;
870  }
871 
872  num = vp8_mbsplit_count[part_idx];
873  mbsplits_cur = vp8_mbsplits[part_idx],
874  firstidx = vp8_mbfirstidx[part_idx];
875  mb->partitioning = part_idx;
876 
877  for (n = 0; n < num; n++) {
878  int k = firstidx[n];
879  uint32_t left, above;
880  const uint8_t *submv_prob;
881 
882  if (!(k & 3))
883  left = AV_RN32A(&left_mv[mbsplits_left[k + 3]]);
884  else
885  left = AV_RN32A(&cur_mv[mbsplits_cur[k - 1]]);
886  if (k <= 3)
887  above = AV_RN32A(&top_mv[mbsplits_top[k + 12]]);
888  else
889  above = AV_RN32A(&cur_mv[mbsplits_cur[k - 4]]);
890 
891  submv_prob = get_submv_prob(left, above, is_vp7);
892 
893  if (vp56_rac_get_prob_branchy(c, submv_prob[0])) {
894  if (vp56_rac_get_prob_branchy(c, submv_prob[1])) {
895  if (vp56_rac_get_prob_branchy(c, submv_prob[2])) {
896  mb->bmv[n].y = mb->mv.y +
897  read_mv_component(c, s->prob->mvc[0], is_vp7);
898  mb->bmv[n].x = mb->mv.x +
899  read_mv_component(c, s->prob->mvc[1], is_vp7);
900  } else {
901  AV_ZERO32(&mb->bmv[n]);
902  }
903  } else {
904  AV_WN32A(&mb->bmv[n], above);
905  }
906  } else {
907  AV_WN32A(&mb->bmv[n], left);
908  }
909  }
910 
911  return num;
912 }
913 
914 /**
915  * The vp7 reference decoder uses a padding macroblock column (added to right
916  * edge of the frame) to guard against illegal macroblock offsets. The
917  * algorithm has bugs that permit offsets to straddle the padding column.
918  * This function replicates those bugs.
919  *
920  * @param[out] edge_x macroblock x address
921  * @param[out] edge_y macroblock y address
922  *
923  * @return macroblock offset legal (boolean)
924  */
925 static int vp7_calculate_mb_offset(int mb_x, int mb_y, int mb_width,
926  int xoffset, int yoffset, int boundary,
927  int *edge_x, int *edge_y)
928 {
929  int vwidth = mb_width + 1;
930  int new = (mb_y + yoffset) * vwidth + mb_x + xoffset;
931  if (new < boundary || new % vwidth == vwidth - 1)
932  return 0;
933  *edge_y = new / vwidth;
934  *edge_x = new % vwidth;
935  return 1;
936 }
937 
938 static const VP56mv *get_bmv_ptr(const VP8Macroblock *mb, int subblock)
939 {
940  return &mb->bmv[mb->mode == VP8_MVMODE_SPLIT ? vp8_mbsplits[mb->partitioning][subblock] : 0];
941 }
942 
943 static av_always_inline
945  int mb_x, int mb_y, int layout)
946 {
947  VP8Macroblock *mb_edge[12];
948  enum { CNT_ZERO, CNT_NEAREST, CNT_NEAR };
949  enum { VP8_EDGE_TOP, VP8_EDGE_LEFT, VP8_EDGE_TOPLEFT };
950  int idx = CNT_ZERO;
951  VP56mv near_mv[3];
952  uint8_t cnt[3] = { 0 };
953  VP56RangeCoder *c = &s->c;
954  int i;
955 
956  AV_ZERO32(&near_mv[0]);
957  AV_ZERO32(&near_mv[1]);
958  AV_ZERO32(&near_mv[2]);
959 
960  for (i = 0; i < VP7_MV_PRED_COUNT; i++) {
961  const VP7MVPred * pred = &vp7_mv_pred[i];
962  int edge_x, edge_y;
963 
964  if (vp7_calculate_mb_offset(mb_x, mb_y, s->mb_width, pred->xoffset,
965  pred->yoffset, !s->profile, &edge_x, &edge_y)) {
966  VP8Macroblock *edge = mb_edge[i] = (s->mb_layout == 1)
967  ? s->macroblocks_base + 1 + edge_x +
968  (s->mb_width + 1) * (edge_y + 1)
969  : s->macroblocks + edge_x +
970  (s->mb_height - edge_y - 1) * 2;
971  uint32_t mv = AV_RN32A(get_bmv_ptr(edge, vp7_mv_pred[i].subblock));
972  if (mv) {
973  if (AV_RN32A(&near_mv[CNT_NEAREST])) {
974  if (mv == AV_RN32A(&near_mv[CNT_NEAREST])) {
975  idx = CNT_NEAREST;
976  } else if (AV_RN32A(&near_mv[CNT_NEAR])) {
977  if (mv != AV_RN32A(&near_mv[CNT_NEAR]))
978  continue;
979  idx = CNT_NEAR;
980  } else {
981  AV_WN32A(&near_mv[CNT_NEAR], mv);
982  idx = CNT_NEAR;
983  }
984  } else {
985  AV_WN32A(&near_mv[CNT_NEAREST], mv);
986  idx = CNT_NEAREST;
987  }
988  } else {
989  idx = CNT_ZERO;
990  }
991  } else {
992  idx = CNT_ZERO;
993  }
994  cnt[idx] += vp7_mv_pred[i].score;
995  }
996 
998 
999  if (vp56_rac_get_prob_branchy(c, vp7_mode_contexts[cnt[CNT_ZERO]][0])) {
1000  mb->mode = VP8_MVMODE_MV;
1001 
1002  if (vp56_rac_get_prob_branchy(c, vp7_mode_contexts[cnt[CNT_NEAREST]][1])) {
1003 
1004  if (vp56_rac_get_prob_branchy(c, vp7_mode_contexts[cnt[CNT_NEAR]][2])) {
1005 
1006  if (cnt[CNT_NEAREST] > cnt[CNT_NEAR])
1007  AV_WN32A(&mb->mv, cnt[CNT_ZERO] > cnt[CNT_NEAREST] ? 0 : AV_RN32A(&near_mv[CNT_NEAREST]));
1008  else
1009  AV_WN32A(&mb->mv, cnt[CNT_ZERO] > cnt[CNT_NEAR] ? 0 : AV_RN32A(&near_mv[CNT_NEAR]));
1010 
1011  if (vp56_rac_get_prob_branchy(c, vp7_mode_contexts[cnt[CNT_NEAR]][3])) {
1012  mb->mode = VP8_MVMODE_SPLIT;
1013  mb->mv = mb->bmv[decode_splitmvs(s, c, mb, layout, IS_VP7) - 1];
1014  } else {
1015  mb->mv.y += vp7_read_mv_component(c, s->prob->mvc[0]);
1016  mb->mv.x += vp7_read_mv_component(c, s->prob->mvc[1]);
1017  mb->bmv[0] = mb->mv;
1018  }
1019  } else {
1020  mb->mv = near_mv[CNT_NEAR];
1021  mb->bmv[0] = mb->mv;
1022  }
1023  } else {
1024  mb->mv = near_mv[CNT_NEAREST];
1025  mb->bmv[0] = mb->mv;
1026  }
1027  } else {
1028  mb->mode = VP8_MVMODE_ZERO;
1029  AV_ZERO32(&mb->mv);
1030  mb->bmv[0] = mb->mv;
1031  }
1032 }
1033 
1034 static av_always_inline
1036  int mb_x, int mb_y, int layout)
1037 {
1038  VP8Macroblock *mb_edge[3] = { 0 /* top */,
1039  mb - 1 /* left */,
1040  0 /* top-left */ };
1041  enum { CNT_ZERO, CNT_NEAREST, CNT_NEAR, CNT_SPLITMV };
1042  enum { VP8_EDGE_TOP, VP8_EDGE_LEFT, VP8_EDGE_TOPLEFT };
1043  int idx = CNT_ZERO;
1044  int cur_sign_bias = s->sign_bias[mb->ref_frame];
1045  int8_t *sign_bias = s->sign_bias;
1046  VP56mv near_mv[4];
1047  uint8_t cnt[4] = { 0 };
1048  VP56RangeCoder *c = &s->c;
1049 
1050  if (!layout) { // layout is inlined (s->mb_layout is not)
1051  mb_edge[0] = mb + 2;
1052  mb_edge[2] = mb + 1;
1053  } else {
1054  mb_edge[0] = mb - s->mb_width - 1;
1055  mb_edge[2] = mb - s->mb_width - 2;
1056  }
1057 
1058  AV_ZERO32(&near_mv[0]);
1059  AV_ZERO32(&near_mv[1]);
1060  AV_ZERO32(&near_mv[2]);
1061 
1062  /* Process MB on top, left and top-left */
1063 #define MV_EDGE_CHECK(n) \
1064  { \
1065  VP8Macroblock *edge = mb_edge[n]; \
1066  int edge_ref = edge->ref_frame; \
1067  if (edge_ref != VP56_FRAME_CURRENT) { \
1068  uint32_t mv = AV_RN32A(&edge->mv); \
1069  if (mv) { \
1070  if (cur_sign_bias != sign_bias[edge_ref]) { \
1071  /* SWAR negate of the values in mv. */ \
1072  mv = ~mv; \
1073  mv = ((mv & 0x7fff7fff) + \
1074  0x00010001) ^ (mv & 0x80008000); \
1075  } \
1076  if (!n || mv != AV_RN32A(&near_mv[idx])) \
1077  AV_WN32A(&near_mv[++idx], mv); \
1078  cnt[idx] += 1 + (n != 2); \
1079  } else \
1080  cnt[CNT_ZERO] += 1 + (n != 2); \
1081  } \
1082  }
1083 
1084  MV_EDGE_CHECK(0)
1085  MV_EDGE_CHECK(1)
1086  MV_EDGE_CHECK(2)
1087 
1089  if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_ZERO]][0])) {
1090  mb->mode = VP8_MVMODE_MV;
1091 
1092  /* If we have three distinct MVs, merge first and last if they're the same */
1093  if (cnt[CNT_SPLITMV] &&
1094  AV_RN32A(&near_mv[1 + VP8_EDGE_TOP]) == AV_RN32A(&near_mv[1 + VP8_EDGE_TOPLEFT]))
1095  cnt[CNT_NEAREST] += 1;
1096 
1097  /* Swap near and nearest if necessary */
1098  if (cnt[CNT_NEAR] > cnt[CNT_NEAREST]) {
1099  FFSWAP(uint8_t, cnt[CNT_NEAREST], cnt[CNT_NEAR]);
1100  FFSWAP( VP56mv, near_mv[CNT_NEAREST], near_mv[CNT_NEAR]);
1101  }
1102 
1103  if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_NEAREST]][1])) {
1104  if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_NEAR]][2])) {
1105  /* Choose the best mv out of 0,0 and the nearest mv */
1106  clamp_mv(s, &mb->mv, &near_mv[CNT_ZERO + (cnt[CNT_NEAREST] >= cnt[CNT_ZERO])]);
1107  cnt[CNT_SPLITMV] = ((mb_edge[VP8_EDGE_LEFT]->mode == VP8_MVMODE_SPLIT) +
1108  (mb_edge[VP8_EDGE_TOP]->mode == VP8_MVMODE_SPLIT)) * 2 +
1109  (mb_edge[VP8_EDGE_TOPLEFT]->mode == VP8_MVMODE_SPLIT);
1110 
1111  if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_SPLITMV]][3])) {
1112  mb->mode = VP8_MVMODE_SPLIT;
1113  mb->mv = mb->bmv[decode_splitmvs(s, c, mb, layout, IS_VP8) - 1];
1114  } else {
1115  mb->mv.y += vp8_read_mv_component(c, s->prob->mvc[0]);
1116  mb->mv.x += vp8_read_mv_component(c, s->prob->mvc[1]);
1117  mb->bmv[0] = mb->mv;
1118  }
1119  } else {
1120  clamp_mv(s, &mb->mv, &near_mv[CNT_NEAR]);
1121  mb->bmv[0] = mb->mv;
1122  }
1123  } else {
1124  clamp_mv(s, &mb->mv, &near_mv[CNT_NEAREST]);
1125  mb->bmv[0] = mb->mv;
1126  }
1127  } else {
1128  mb->mode = VP8_MVMODE_ZERO;
1129  AV_ZERO32(&mb->mv);
1130  mb->bmv[0] = mb->mv;
1131  }
1132 }
1133 
1134 static av_always_inline
1136  int mb_x, int keyframe, int layout)
1137 {
1138  uint8_t *intra4x4 = mb->intra4x4_pred_mode_mb;
1139 
1140  if (layout) {
1141  VP8Macroblock *mb_top = mb - s->mb_width - 1;
1142  memcpy(mb->intra4x4_pred_mode_top, mb_top->intra4x4_pred_mode_top, 4);
1143  }
1144  if (keyframe) {
1145  int x, y;
1146  uint8_t *top;
1147  uint8_t *const left = s->intra4x4_pred_mode_left;
1148  if (layout)
1149  top = mb->intra4x4_pred_mode_top;
1150  else
1151  top = s->intra4x4_pred_mode_top + 4 * mb_x;
1152  for (y = 0; y < 4; y++) {
1153  for (x = 0; x < 4; x++) {
1154  const uint8_t *ctx;
1155  ctx = vp8_pred4x4_prob_intra[top[x]][left[y]];
1156  *intra4x4 = vp8_rac_get_tree(c, vp8_pred4x4_tree, ctx);
1157  left[y] = top[x] = *intra4x4;
1158  intra4x4++;
1159  }
1160  }
1161  } else {
1162  int i;
1163  for (i = 0; i < 16; i++)
1164  intra4x4[i] = vp8_rac_get_tree(c, vp8_pred4x4_tree,
1166  }
1167 }
1168 
1169 static av_always_inline
1170 void decode_mb_mode(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y,
1171  uint8_t *segment, uint8_t *ref, int layout, int is_vp7)
1172 {
1173  VP56RangeCoder *c = &s->c;
1174  const char *vp7_feature_name[] = { "q-index",
1175  "lf-delta",
1176  "partial-golden-update",
1177  "blit-pitch" };
1178  if (is_vp7) {
1179  int i;
1180  *segment = 0;
1181  for (i = 0; i < 4; i++) {
1182  if (s->feature_enabled[i]) {
1185  s->feature_index_prob[i]);
1187  "Feature %s present in macroblock (value 0x%x)\n",
1188  vp7_feature_name[i], s->feature_value[i][index]);
1189  }
1190  }
1191  }
1192  } else if (s->segmentation.update_map) {
1193  int bit = vp56_rac_get_prob(c, s->prob->segmentid[0]);
1194  *segment = vp56_rac_get_prob(c, s->prob->segmentid[1+bit]) + 2*bit;
1195  } else if (s->segmentation.enabled)
1196  *segment = ref ? *ref : *segment;
1197  mb->segment = *segment;
1198 
1199  mb->skip = s->mbskip_enabled ? vp56_rac_get_prob(c, s->prob->mbskip) : 0;
1200 
1201  if (s->keyframe) {
1204 
1205  if (mb->mode == MODE_I4x4) {
1206  decode_intra4x4_modes(s, c, mb, mb_x, 1, layout);
1207  } else {
1208  const uint32_t modes = (is_vp7 ? vp7_pred4x4_mode
1209  : vp8_pred4x4_mode)[mb->mode] * 0x01010101u;
1210  if (s->mb_layout)
1211  AV_WN32A(mb->intra4x4_pred_mode_top, modes);
1212  else
1213  AV_WN32A(s->intra4x4_pred_mode_top + 4 * mb_x, modes);
1214  AV_WN32A(s->intra4x4_pred_mode_left, modes);
1215  }
1216 
1220  } else if (vp56_rac_get_prob_branchy(c, s->prob->intra)) {
1221  // inter MB, 16.2
1222  if (vp56_rac_get_prob_branchy(c, s->prob->last))
1223  mb->ref_frame =
1224  (!is_vp7 && vp56_rac_get_prob(c, s->prob->golden)) ? VP56_FRAME_GOLDEN2 /* altref */
1226  else
1228  s->ref_count[mb->ref_frame - 1]++;
1229 
1230  // motion vectors, 16.3
1231  if (is_vp7)
1232  vp7_decode_mvs(s, mb, mb_x, mb_y, layout);
1233  else
1234  vp8_decode_mvs(s, mb, mb_x, mb_y, layout);
1235  } else {
1236  // intra MB, 16.1
1238 
1239  if (mb->mode == MODE_I4x4)
1240  decode_intra4x4_modes(s, c, mb, mb_x, 0, layout);
1241 
1243  s->prob->pred8x8c);
1246  AV_ZERO32(&mb->bmv[0]);
1247  }
1248 }
1249 
1250 /**
1251  * @param r arithmetic bitstream reader context
1252  * @param block destination for block coefficients
1253  * @param probs probabilities to use when reading trees from the bitstream
1254  * @param i initial coeff index, 0 unless a separate DC block is coded
1255  * @param qmul array holding the dc/ac dequant factor at position 0/1
1256  *
1257  * @return 0 if no coeffs were decoded
1258  * otherwise, the index of the last coeff decoded plus one
1259  */
1260 static av_always_inline
1262  uint8_t probs[16][3][NUM_DCT_TOKENS - 1],
1263  int i, uint8_t *token_prob, int16_t qmul[2],
1264  const uint8_t scan[16], int vp7)
1265 {
1266  VP56RangeCoder c = *r;
1267  goto skip_eob;
1268  do {
1269  int coeff;
1270 restart:
1271  if (!vp56_rac_get_prob_branchy(&c, token_prob[0])) // DCT_EOB
1272  break;
1273 
1274 skip_eob:
1275  if (!vp56_rac_get_prob_branchy(&c, token_prob[1])) { // DCT_0
1276  if (++i == 16)
1277  break; // invalid input; blocks should end with EOB
1278  token_prob = probs[i][0];
1279  if (vp7)
1280  goto restart;
1281  goto skip_eob;
1282  }
1283 
1284  if (!vp56_rac_get_prob_branchy(&c, token_prob[2])) { // DCT_1
1285  coeff = 1;
1286  token_prob = probs[i + 1][1];
1287  } else {
1288  if (!vp56_rac_get_prob_branchy(&c, token_prob[3])) { // DCT 2,3,4
1289  coeff = vp56_rac_get_prob_branchy(&c, token_prob[4]);
1290  if (coeff)
1291  coeff += vp56_rac_get_prob(&c, token_prob[5]);
1292  coeff += 2;
1293  } else {
1294  // DCT_CAT*
1295  if (!vp56_rac_get_prob_branchy(&c, token_prob[6])) {
1296  if (!vp56_rac_get_prob_branchy(&c, token_prob[7])) { // DCT_CAT1
1297  coeff = 5 + vp56_rac_get_prob(&c, vp8_dct_cat1_prob[0]);
1298  } else { // DCT_CAT2
1299  coeff = 7;
1300  coeff += vp56_rac_get_prob(&c, vp8_dct_cat2_prob[0]) << 1;
1301  coeff += vp56_rac_get_prob(&c, vp8_dct_cat2_prob[1]);
1302  }
1303  } else { // DCT_CAT3 and up
1304  int a = vp56_rac_get_prob(&c, token_prob[8]);
1305  int b = vp56_rac_get_prob(&c, token_prob[9 + a]);
1306  int cat = (a << 1) + b;
1307  coeff = 3 + (8 << cat);
1308  coeff += vp8_rac_get_coeff(&c, ff_vp8_dct_cat_prob[cat]);
1309  }
1310  }
1311  token_prob = probs[i + 1][2];
1312  }
1313  block[scan[i]] = (vp8_rac_get(&c) ? -coeff : coeff) * qmul[!!i];
1314  } while (++i < 16);
1315 
1316  *r = c;
1317  return i;
1318 }
1319 
1320 static av_always_inline
1321 int inter_predict_dc(int16_t block[16], int16_t pred[2])
1322 {
1323  int16_t dc = block[0];
1324  int ret = 0;
1325 
1326  if (pred[1] > 3) {
1327  dc += pred[0];
1328  ret = 1;
1329  }
1330 
1331  if (!pred[0] | !dc | ((int32_t)pred[0] ^ (int32_t)dc) >> 31) {
1332  block[0] = pred[0] = dc;
1333  pred[1] = 0;
1334  } else {
1335  if (pred[0] == dc)
1336  pred[1]++;
1337  block[0] = pred[0] = dc;
1338  }
1339 
1340  return ret;
1341 }
1342 
1344  int16_t block[16],
1345  uint8_t probs[16][3][NUM_DCT_TOKENS - 1],
1346  int i, uint8_t *token_prob,
1347  int16_t qmul[2],
1348  const uint8_t scan[16])
1349 {
1350  return decode_block_coeffs_internal(r, block, probs, i,
1351  token_prob, qmul, scan, IS_VP7);
1352 }
1353 
1354 #ifndef vp8_decode_block_coeffs_internal
1356  int16_t block[16],
1357  uint8_t probs[16][3][NUM_DCT_TOKENS - 1],
1358  int i, uint8_t *token_prob,
1359  int16_t qmul[2])
1360 {
1361  return decode_block_coeffs_internal(r, block, probs, i,
1362  token_prob, qmul, zigzag_scan, IS_VP8);
1363 }
1364 #endif
1365 
1366 /**
1367  * @param c arithmetic bitstream reader context
1368  * @param block destination for block coefficients
1369  * @param probs probabilities to use when reading trees from the bitstream
1370  * @param i initial coeff index, 0 unless a separate DC block is coded
1371  * @param zero_nhood the initial prediction context for number of surrounding
1372  * all-zero blocks (only left/top, so 0-2)
1373  * @param qmul array holding the dc/ac dequant factor at position 0/1
1374  * @param scan scan pattern (VP7 only)
1375  *
1376  * @return 0 if no coeffs were decoded
1377  * otherwise, the index of the last coeff decoded plus one
1378  */
1379 static av_always_inline
1381  uint8_t probs[16][3][NUM_DCT_TOKENS - 1],
1382  int i, int zero_nhood, int16_t qmul[2],
1383  const uint8_t scan[16], int vp7)
1384 {
1385  uint8_t *token_prob = probs[i][zero_nhood];
1386  if (!vp56_rac_get_prob_branchy(c, token_prob[0])) // DCT_EOB
1387  return 0;
1388  return vp7 ? vp7_decode_block_coeffs_internal(c, block, probs, i,
1389  token_prob, qmul, scan)
1390  : vp8_decode_block_coeffs_internal(c, block, probs, i,
1391  token_prob, qmul);
1392 }
1393 
1394 static av_always_inline
1396  VP8Macroblock *mb, uint8_t t_nnz[9], uint8_t l_nnz[9],
1397  int is_vp7)
1398 {
1399  int i, x, y, luma_start = 0, luma_ctx = 3;
1400  int nnz_pred, nnz, nnz_total = 0;
1401  int segment = mb->segment;
1402  int block_dc = 0;
1403 
1404  if (mb->mode != MODE_I4x4 && (is_vp7 || mb->mode != VP8_MVMODE_SPLIT)) {
1405  nnz_pred = t_nnz[8] + l_nnz[8];
1406 
1407  // decode DC values and do hadamard
1408  nnz = decode_block_coeffs(c, td->block_dc, s->prob->token[1], 0,
1409  nnz_pred, s->qmat[segment].luma_dc_qmul,
1410  zigzag_scan, is_vp7);
1411  l_nnz[8] = t_nnz[8] = !!nnz;
1412 
1413  if (is_vp7 && mb->mode > MODE_I4x4) {
1414  nnz |= inter_predict_dc(td->block_dc,
1415  s->inter_dc_pred[mb->ref_frame - 1]);
1416  }
1417 
1418  if (nnz) {
1419  nnz_total += nnz;
1420  block_dc = 1;
1421  if (nnz == 1)
1422  s->vp8dsp.vp8_luma_dc_wht_dc(td->block, td->block_dc);
1423  else
1424  s->vp8dsp.vp8_luma_dc_wht(td->block, td->block_dc);
1425  }
1426  luma_start = 1;
1427  luma_ctx = 0;
1428  }
1429 
1430  // luma blocks
1431  for (y = 0; y < 4; y++)
1432  for (x = 0; x < 4; x++) {
1433  nnz_pred = l_nnz[y] + t_nnz[x];
1434  nnz = decode_block_coeffs(c, td->block[y][x],
1435  s->prob->token[luma_ctx],
1436  luma_start, nnz_pred,
1437  s->qmat[segment].luma_qmul,
1438  s->prob[0].scan, is_vp7);
1439  /* nnz+block_dc may be one more than the actual last index,
1440  * but we don't care */
1441  td->non_zero_count_cache[y][x] = nnz + block_dc;
1442  t_nnz[x] = l_nnz[y] = !!nnz;
1443  nnz_total += nnz;
1444  }
1445 
1446  // chroma blocks
1447  // TODO: what to do about dimensions? 2nd dim for luma is x,
1448  // but for chroma it's (y<<1)|x
1449  for (i = 4; i < 6; i++)
1450  for (y = 0; y < 2; y++)
1451  for (x = 0; x < 2; x++) {
1452  nnz_pred = l_nnz[i + 2 * y] + t_nnz[i + 2 * x];
1453  nnz = decode_block_coeffs(c, td->block[i][(y << 1) + x],
1454  s->prob->token[2], 0, nnz_pred,
1455  s->qmat[segment].chroma_qmul,
1456  s->prob[0].scan, is_vp7);
1457  td->non_zero_count_cache[i][(y << 1) + x] = nnz;
1458  t_nnz[i + 2 * x] = l_nnz[i + 2 * y] = !!nnz;
1459  nnz_total += nnz;
1460  }
1461 
1462  // if there were no coded coeffs despite the macroblock not being marked skip,
1463  // we MUST not do the inner loop filter and should not do IDCT
1464  // Since skip isn't used for bitstream prediction, just manually set it.
1465  if (!nnz_total)
1466  mb->skip = 1;
1467 }
1468 
1469 static av_always_inline
1470 void backup_mb_border(uint8_t *top_border, uint8_t *src_y,
1471  uint8_t *src_cb, uint8_t *src_cr,
1472  int linesize, int uvlinesize, int simple)
1473 {
1474  AV_COPY128(top_border, src_y + 15 * linesize);
1475  if (!simple) {
1476  AV_COPY64(top_border + 16, src_cb + 7 * uvlinesize);
1477  AV_COPY64(top_border + 24, src_cr + 7 * uvlinesize);
1478  }
1479 }
1480 
1481 static av_always_inline
1482 void xchg_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb,
1483  uint8_t *src_cr, int linesize, int uvlinesize, int mb_x,
1484  int mb_y, int mb_width, int simple, int xchg)
1485 {
1486  uint8_t *top_border_m1 = top_border - 32; // for TL prediction
1487  src_y -= linesize;
1488  src_cb -= uvlinesize;
1489  src_cr -= uvlinesize;
1490 
1491 #define XCHG(a, b, xchg) \
1492  do { \
1493  if (xchg) \
1494  AV_SWAP64(b, a); \
1495  else \
1496  AV_COPY64(b, a); \
1497  } while (0)
1498 
1499  XCHG(top_border_m1 + 8, src_y - 8, xchg);
1500  XCHG(top_border, src_y, xchg);
1501  XCHG(top_border + 8, src_y + 8, 1);
1502  if (mb_x < mb_width - 1)
1503  XCHG(top_border + 32, src_y + 16, 1);
1504 
1505  // only copy chroma for normal loop filter
1506  // or to initialize the top row to 127
1507  if (!simple || !mb_y) {
1508  XCHG(top_border_m1 + 16, src_cb - 8, xchg);
1509  XCHG(top_border_m1 + 24, src_cr - 8, xchg);
1510  XCHG(top_border + 16, src_cb, 1);
1511  XCHG(top_border + 24, src_cr, 1);
1512  }
1513 }
1514 
1515 static av_always_inline
1516 int check_dc_pred8x8_mode(int mode, int mb_x, int mb_y)
1517 {
1518  if (!mb_x)
1519  return mb_y ? TOP_DC_PRED8x8 : DC_128_PRED8x8;
1520  else
1521  return mb_y ? mode : LEFT_DC_PRED8x8;
1522 }
1523 
1524 static av_always_inline
1525 int check_tm_pred8x8_mode(int mode, int mb_x, int mb_y, int vp7)
1526 {
1527  if (!mb_x)
1528  return mb_y ? VERT_PRED8x8 : (vp7 ? DC_128_PRED8x8 : DC_129_PRED8x8);
1529  else
1530  return mb_y ? mode : HOR_PRED8x8;
1531 }
1532 
1533 static av_always_inline
1534 int check_intra_pred8x8_mode_emuedge(int mode, int mb_x, int mb_y, int vp7)
1535 {
1536  switch (mode) {
1537  case DC_PRED8x8:
1538  return check_dc_pred8x8_mode(mode, mb_x, mb_y);
1539  case VERT_PRED8x8:
1540  return !mb_y ? (vp7 ? DC_128_PRED8x8 : DC_127_PRED8x8) : mode;
1541  case HOR_PRED8x8:
1542  return !mb_x ? (vp7 ? DC_128_PRED8x8 : DC_129_PRED8x8) : mode;
1543  case PLANE_PRED8x8: /* TM */
1544  return check_tm_pred8x8_mode(mode, mb_x, mb_y, vp7);
1545  }
1546  return mode;
1547 }
1548 
1549 static av_always_inline
1550 int check_tm_pred4x4_mode(int mode, int mb_x, int mb_y, int vp7)
1551 {
1552  if (!mb_x) {
1553  return mb_y ? VERT_VP8_PRED : (vp7 ? DC_128_PRED : DC_129_PRED);
1554  } else {
1555  return mb_y ? mode : HOR_VP8_PRED;
1556  }
1557 }
1558 
1559 static av_always_inline
1560 int check_intra_pred4x4_mode_emuedge(int mode, int mb_x, int mb_y,
1561  int *copy_buf, int vp7)
1562 {
1563  switch (mode) {
1564  case VERT_PRED:
1565  if (!mb_x && mb_y) {
1566  *copy_buf = 1;
1567  return mode;
1568  }
1569  /* fall-through */
1570  case DIAG_DOWN_LEFT_PRED:
1571  case VERT_LEFT_PRED:
1572  return !mb_y ? (vp7 ? DC_128_PRED : DC_127_PRED) : mode;
1573  case HOR_PRED:
1574  if (!mb_y) {
1575  *copy_buf = 1;
1576  return mode;
1577  }
1578  /* fall-through */
1579  case HOR_UP_PRED:
1580  return !mb_x ? (vp7 ? DC_128_PRED : DC_129_PRED) : mode;
1581  case TM_VP8_PRED:
1582  return check_tm_pred4x4_mode(mode, mb_x, mb_y, vp7);
1583  case DC_PRED: /* 4x4 DC doesn't use the same "H.264-style" exceptions
1584  * as 16x16/8x8 DC */
1585  case DIAG_DOWN_RIGHT_PRED:
1586  case VERT_RIGHT_PRED:
1587  case HOR_DOWN_PRED:
1588  if (!mb_y || !mb_x)
1589  *copy_buf = 1;
1590  return mode;
1591  }
1592  return mode;
1593 }
1594 
1595 static av_always_inline
1597  VP8Macroblock *mb, int mb_x, int mb_y, int is_vp7)
1598 {
1599  int x, y, mode, nnz;
1600  uint32_t tr;
1601 
1602  /* for the first row, we need to run xchg_mb_border to init the top edge
1603  * to 127 otherwise, skip it if we aren't going to deblock */
1604  if (mb_y && (s->deblock_filter || !mb_y) && td->thread_nr == 0)
1605  xchg_mb_border(s->top_border[mb_x + 1], dst[0], dst[1], dst[2],
1606  s->linesize, s->uvlinesize, mb_x, mb_y, s->mb_width,
1607  s->filter.simple, 1);
1608 
1609  if (mb->mode < MODE_I4x4) {
1610  mode = check_intra_pred8x8_mode_emuedge(mb->mode, mb_x, mb_y, is_vp7);
1611  s->hpc.pred16x16[mode](dst[0], s->linesize);
1612  } else {
1613  uint8_t *ptr = dst[0];
1614  uint8_t *intra4x4 = mb->intra4x4_pred_mode_mb;
1615  const uint8_t lo = is_vp7 ? 128 : 127;
1616  const uint8_t hi = is_vp7 ? 128 : 129;
1617  uint8_t tr_top[4] = { lo, lo, lo, lo };
1618 
1619  // all blocks on the right edge of the macroblock use bottom edge
1620  // the top macroblock for their topright edge
1621  uint8_t *tr_right = ptr - s->linesize + 16;
1622 
1623  // if we're on the right edge of the frame, said edge is extended
1624  // from the top macroblock
1625  if (mb_y && mb_x == s->mb_width - 1) {
1626  tr = tr_right[-1] * 0x01010101u;
1627  tr_right = (uint8_t *) &tr;
1628  }
1629 
1630  if (mb->skip)
1632 
1633  for (y = 0; y < 4; y++) {
1634  uint8_t *topright = ptr + 4 - s->linesize;
1635  for (x = 0; x < 4; x++) {
1636  int copy = 0, linesize = s->linesize;
1637  uint8_t *dst = ptr + 4 * x;
1638  LOCAL_ALIGNED(4, uint8_t, copy_dst, [5 * 8]);
1639 
1640  if ((y == 0 || x == 3) && mb_y == 0) {
1641  topright = tr_top;
1642  } else if (x == 3)
1643  topright = tr_right;
1644 
1645  mode = check_intra_pred4x4_mode_emuedge(intra4x4[x], mb_x + x,
1646  mb_y + y, &copy, is_vp7);
1647  if (copy) {
1648  dst = copy_dst + 12;
1649  linesize = 8;
1650  if (!(mb_y + y)) {
1651  copy_dst[3] = lo;
1652  AV_WN32A(copy_dst + 4, lo * 0x01010101U);
1653  } else {
1654  AV_COPY32(copy_dst + 4, ptr + 4 * x - s->linesize);
1655  if (!(mb_x + x)) {
1656  copy_dst[3] = hi;
1657  } else {
1658  copy_dst[3] = ptr[4 * x - s->linesize - 1];
1659  }
1660  }
1661  if (!(mb_x + x)) {
1662  copy_dst[11] =
1663  copy_dst[19] =
1664  copy_dst[27] =
1665  copy_dst[35] = hi;
1666  } else {
1667  copy_dst[11] = ptr[4 * x - 1];
1668  copy_dst[19] = ptr[4 * x + s->linesize - 1];
1669  copy_dst[27] = ptr[4 * x + s->linesize * 2 - 1];
1670  copy_dst[35] = ptr[4 * x + s->linesize * 3 - 1];
1671  }
1672  }
1673  s->hpc.pred4x4[mode](dst, topright, linesize);
1674  if (copy) {
1675  AV_COPY32(ptr + 4 * x, copy_dst + 12);
1676  AV_COPY32(ptr + 4 * x + s->linesize, copy_dst + 20);
1677  AV_COPY32(ptr + 4 * x + s->linesize * 2, copy_dst + 28);
1678  AV_COPY32(ptr + 4 * x + s->linesize * 3, copy_dst + 36);
1679  }
1680 
1681  nnz = td->non_zero_count_cache[y][x];
1682  if (nnz) {
1683  if (nnz == 1)
1684  s->vp8dsp.vp8_idct_dc_add(ptr + 4 * x,
1685  td->block[y][x], s->linesize);
1686  else
1687  s->vp8dsp.vp8_idct_add(ptr + 4 * x,
1688  td->block[y][x], s->linesize);
1689  }
1690  topright += 4;
1691  }
1692 
1693  ptr += 4 * s->linesize;
1694  intra4x4 += 4;
1695  }
1696  }
1697 
1699  mb_x, mb_y, is_vp7);
1700  s->hpc.pred8x8[mode](dst[1], s->uvlinesize);
1701  s->hpc.pred8x8[mode](dst[2], s->uvlinesize);
1702 
1703  if (mb_y && (s->deblock_filter || !mb_y) && td->thread_nr == 0)
1704  xchg_mb_border(s->top_border[mb_x + 1], dst[0], dst[1], dst[2],
1705  s->linesize, s->uvlinesize, mb_x, mb_y, s->mb_width,
1706  s->filter.simple, 0);
1707 }
1708 
1709 static const uint8_t subpel_idx[3][8] = {
1710  { 0, 1, 2, 1, 2, 1, 2, 1 }, // nr. of left extra pixels,
1711  // also function pointer index
1712  { 0, 3, 5, 3, 5, 3, 5, 3 }, // nr. of extra pixels required
1713  { 0, 2, 3, 2, 3, 2, 3, 2 }, // nr. of right extra pixels
1714 };
1715 
1716 /**
1717  * luma MC function
1718  *
1719  * @param s VP8 decoding context
1720  * @param dst target buffer for block data at block position
1721  * @param ref reference picture buffer at origin (0, 0)
1722  * @param mv motion vector (relative to block position) to get pixel data from
1723  * @param x_off horizontal position of block from origin (0, 0)
1724  * @param y_off vertical position of block from origin (0, 0)
1725  * @param block_w width of block (16, 8 or 4)
1726  * @param block_h height of block (always same as block_w)
1727  * @param width width of src/dst plane data
1728  * @param height height of src/dst plane data
1729  * @param linesize size of a single line of plane data, including padding
1730  * @param mc_func motion compensation function pointers (bilinear or sixtap MC)
1731  */
1732 static av_always_inline
1734  ThreadFrame *ref, const VP56mv *mv,
1735  int x_off, int y_off, int block_w, int block_h,
1736  int width, int height, ptrdiff_t linesize,
1737  vp8_mc_func mc_func[3][3])
1738 {
1739  uint8_t *src = ref->f->data[0];
1740 
1741  if (AV_RN32A(mv)) {
1742  int src_linesize = linesize;
1743 
1744  int mx = (mv->x * 2) & 7, mx_idx = subpel_idx[0][mx];
1745  int my = (mv->y * 2) & 7, my_idx = subpel_idx[0][my];
1746 
1747  x_off += mv->x >> 2;
1748  y_off += mv->y >> 2;
1749 
1750  // edge emulation
1751  ff_thread_await_progress(ref, (3 + y_off + block_h + subpel_idx[2][my]) >> 4, 0);
1752  src += y_off * linesize + x_off;
1753  if (x_off < mx_idx || x_off >= width - block_w - subpel_idx[2][mx] ||
1754  y_off < my_idx || y_off >= height - block_h - subpel_idx[2][my]) {
1756  src - my_idx * linesize - mx_idx,
1757  EDGE_EMU_LINESIZE, linesize,
1758  block_w + subpel_idx[1][mx],
1759  block_h + subpel_idx[1][my],
1760  x_off - mx_idx, y_off - my_idx,
1761  width, height);
1762  src = td->edge_emu_buffer + mx_idx + EDGE_EMU_LINESIZE * my_idx;
1763  src_linesize = EDGE_EMU_LINESIZE;
1764  }
1765  mc_func[my_idx][mx_idx](dst, linesize, src, src_linesize, block_h, mx, my);
1766  } else {
1767  ff_thread_await_progress(ref, (3 + y_off + block_h) >> 4, 0);
1768  mc_func[0][0](dst, linesize, src + y_off * linesize + x_off,
1769  linesize, block_h, 0, 0);
1770  }
1771 }
1772 
1773 /**
1774  * chroma MC function
1775  *
1776  * @param s VP8 decoding context
1777  * @param dst1 target buffer for block data at block position (U plane)
1778  * @param dst2 target buffer for block data at block position (V plane)
1779  * @param ref reference picture buffer at origin (0, 0)
1780  * @param mv motion vector (relative to block position) to get pixel data from
1781  * @param x_off horizontal position of block from origin (0, 0)
1782  * @param y_off vertical position of block from origin (0, 0)
1783  * @param block_w width of block (16, 8 or 4)
1784  * @param block_h height of block (always same as block_w)
1785  * @param width width of src/dst plane data
1786  * @param height height of src/dst plane data
1787  * @param linesize size of a single line of plane data, including padding
1788  * @param mc_func motion compensation function pointers (bilinear or sixtap MC)
1789  */
1790 static av_always_inline
1792  uint8_t *dst2, ThreadFrame *ref, const VP56mv *mv,
1793  int x_off, int y_off, int block_w, int block_h,
1794  int width, int height, ptrdiff_t linesize,
1795  vp8_mc_func mc_func[3][3])
1796 {
1797  uint8_t *src1 = ref->f->data[1], *src2 = ref->f->data[2];
1798 
1799  if (AV_RN32A(mv)) {
1800  int mx = mv->x & 7, mx_idx = subpel_idx[0][mx];
1801  int my = mv->y & 7, my_idx = subpel_idx[0][my];
1802 
1803  x_off += mv->x >> 3;
1804  y_off += mv->y >> 3;
1805 
1806  // edge emulation
1807  src1 += y_off * linesize + x_off;
1808  src2 += y_off * linesize + x_off;
1809  ff_thread_await_progress(ref, (3 + y_off + block_h + subpel_idx[2][my]) >> 3, 0);
1810  if (x_off < mx_idx || x_off >= width - block_w - subpel_idx[2][mx] ||
1811  y_off < my_idx || y_off >= height - block_h - subpel_idx[2][my]) {
1813  src1 - my_idx * linesize - mx_idx,
1814  EDGE_EMU_LINESIZE, linesize,
1815  block_w + subpel_idx[1][mx],
1816  block_h + subpel_idx[1][my],
1817  x_off - mx_idx, y_off - my_idx, width, height);
1818  src1 = td->edge_emu_buffer + mx_idx + EDGE_EMU_LINESIZE * my_idx;
1819  mc_func[my_idx][mx_idx](dst1, linesize, src1, EDGE_EMU_LINESIZE, block_h, mx, my);
1820 
1822  src2 - my_idx * linesize - mx_idx,
1823  EDGE_EMU_LINESIZE, linesize,
1824  block_w + subpel_idx[1][mx],
1825  block_h + subpel_idx[1][my],
1826  x_off - mx_idx, y_off - my_idx, width, height);
1827  src2 = td->edge_emu_buffer + mx_idx + EDGE_EMU_LINESIZE * my_idx;
1828  mc_func[my_idx][mx_idx](dst2, linesize, src2, EDGE_EMU_LINESIZE, block_h, mx, my);
1829  } else {
1830  mc_func[my_idx][mx_idx](dst1, linesize, src1, linesize, block_h, mx, my);
1831  mc_func[my_idx][mx_idx](dst2, linesize, src2, linesize, block_h, mx, my);
1832  }
1833  } else {
1834  ff_thread_await_progress(ref, (3 + y_off + block_h) >> 3, 0);
1835  mc_func[0][0](dst1, linesize, src1 + y_off * linesize + x_off, linesize, block_h, 0, 0);
1836  mc_func[0][0](dst2, linesize, src2 + y_off * linesize + x_off, linesize, block_h, 0, 0);
1837  }
1838 }
1839 
1840 static av_always_inline
1842  ThreadFrame *ref_frame, int x_off, int y_off,
1843  int bx_off, int by_off, int block_w, int block_h,
1844  int width, int height, VP56mv *mv)
1845 {
1846  VP56mv uvmv = *mv;
1847 
1848  /* Y */
1849  vp8_mc_luma(s, td, dst[0] + by_off * s->linesize + bx_off,
1850  ref_frame, mv, x_off + bx_off, y_off + by_off,
1851  block_w, block_h, width, height, s->linesize,
1852  s->put_pixels_tab[block_w == 8]);
1853 
1854  /* U/V */
1855  if (s->profile == 3) {
1856  /* this block only applies VP8; it is safe to check
1857  * only the profile, as VP7 profile <= 1 */
1858  uvmv.x &= ~7;
1859  uvmv.y &= ~7;
1860  }
1861  x_off >>= 1;
1862  y_off >>= 1;
1863  bx_off >>= 1;
1864  by_off >>= 1;
1865  width >>= 1;
1866  height >>= 1;
1867  block_w >>= 1;
1868  block_h >>= 1;
1869  vp8_mc_chroma(s, td, dst[1] + by_off * s->uvlinesize + bx_off,
1870  dst[2] + by_off * s->uvlinesize + bx_off, ref_frame,
1871  &uvmv, x_off + bx_off, y_off + by_off,
1872  block_w, block_h, width, height, s->uvlinesize,
1873  s->put_pixels_tab[1 + (block_w == 4)]);
1874 }
1875 
1876 /* Fetch pixels for estimated mv 4 macroblocks ahead.
1877  * Optimized for 64-byte cache lines. Inspired by ffh264 prefetch_motion. */
1878 static av_always_inline
1879 void prefetch_motion(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y,
1880  int mb_xy, int ref)
1881 {
1882  /* Don't prefetch refs that haven't been used very often this frame. */
1883  if (s->ref_count[ref - 1] > (mb_xy >> 5)) {
1884  int x_off = mb_x << 4, y_off = mb_y << 4;
1885  int mx = (mb->mv.x >> 2) + x_off + 8;
1886  int my = (mb->mv.y >> 2) + y_off;
1887  uint8_t **src = s->framep[ref]->tf.f->data;
1888  int off = mx + (my + (mb_x & 3) * 4) * s->linesize + 64;
1889  /* For threading, a ff_thread_await_progress here might be useful, but
1890  * it actually slows down the decoder. Since a bad prefetch doesn't
1891  * generate bad decoder output, we don't run it here. */
1892  s->vdsp.prefetch(src[0] + off, s->linesize, 4);
1893  off = (mx >> 1) + ((my >> 1) + (mb_x & 7)) * s->uvlinesize + 64;
1894  s->vdsp.prefetch(src[1] + off, src[2] - src[1], 2);
1895  }
1896 }
1897 
1898 /**
1899  * Apply motion vectors to prediction buffer, chapter 18.
1900  */
1901 static av_always_inline
1903  VP8Macroblock *mb, int mb_x, int mb_y)
1904 {
1905  int x_off = mb_x << 4, y_off = mb_y << 4;
1906  int width = 16 * s->mb_width, height = 16 * s->mb_height;
1907  ThreadFrame *ref = &s->framep[mb->ref_frame]->tf;
1908  VP56mv *bmv = mb->bmv;
1909 
1910  switch (mb->partitioning) {
1911  case VP8_SPLITMVMODE_NONE:
1912  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1913  0, 0, 16, 16, width, height, &mb->mv);
1914  break;
1915  case VP8_SPLITMVMODE_4x4: {
1916  int x, y;
1917  VP56mv uvmv;
1918 
1919  /* Y */
1920  for (y = 0; y < 4; y++) {
1921  for (x = 0; x < 4; x++) {
1922  vp8_mc_luma(s, td, dst[0] + 4 * y * s->linesize + x * 4,
1923  ref, &bmv[4 * y + x],
1924  4 * x + x_off, 4 * y + y_off, 4, 4,
1925  width, height, s->linesize,
1926  s->put_pixels_tab[2]);
1927  }
1928  }
1929 
1930  /* U/V */
1931  x_off >>= 1;
1932  y_off >>= 1;
1933  width >>= 1;
1934  height >>= 1;
1935  for (y = 0; y < 2; y++) {
1936  for (x = 0; x < 2; x++) {
1937  uvmv.x = mb->bmv[2 * y * 4 + 2 * x ].x +
1938  mb->bmv[2 * y * 4 + 2 * x + 1].x +
1939  mb->bmv[(2 * y + 1) * 4 + 2 * x ].x +
1940  mb->bmv[(2 * y + 1) * 4 + 2 * x + 1].x;
1941  uvmv.y = mb->bmv[2 * y * 4 + 2 * x ].y +
1942  mb->bmv[2 * y * 4 + 2 * x + 1].y +
1943  mb->bmv[(2 * y + 1) * 4 + 2 * x ].y +
1944  mb->bmv[(2 * y + 1) * 4 + 2 * x + 1].y;
1945  uvmv.x = (uvmv.x + 2 + FF_SIGNBIT(uvmv.x)) >> 2;
1946  uvmv.y = (uvmv.y + 2 + FF_SIGNBIT(uvmv.y)) >> 2;
1947  if (s->profile == 3) {
1948  uvmv.x &= ~7;
1949  uvmv.y &= ~7;
1950  }
1951  vp8_mc_chroma(s, td, dst[1] + 4 * y * s->uvlinesize + x * 4,
1952  dst[2] + 4 * y * s->uvlinesize + x * 4, ref,
1953  &uvmv, 4 * x + x_off, 4 * y + y_off, 4, 4,
1954  width, height, s->uvlinesize,
1955  s->put_pixels_tab[2]);
1956  }
1957  }
1958  break;
1959  }
1960  case VP8_SPLITMVMODE_16x8:
1961  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1962  0, 0, 16, 8, width, height, &bmv[0]);
1963  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1964  0, 8, 16, 8, width, height, &bmv[1]);
1965  break;
1966  case VP8_SPLITMVMODE_8x16:
1967  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1968  0, 0, 8, 16, width, height, &bmv[0]);
1969  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1970  8, 0, 8, 16, width, height, &bmv[1]);
1971  break;
1972  case VP8_SPLITMVMODE_8x8:
1973  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1974  0, 0, 8, 8, width, height, &bmv[0]);
1975  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1976  8, 0, 8, 8, width, height, &bmv[1]);
1977  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1978  0, 8, 8, 8, width, height, &bmv[2]);
1979  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1980  8, 8, 8, 8, width, height, &bmv[3]);
1981  break;
1982  }
1983 }
1984 
1985 static av_always_inline
1987 {
1988  int x, y, ch;
1989 
1990  if (mb->mode != MODE_I4x4) {
1991  uint8_t *y_dst = dst[0];
1992  for (y = 0; y < 4; y++) {
1993  uint32_t nnz4 = AV_RL32(td->non_zero_count_cache[y]);
1994  if (nnz4) {
1995  if (nnz4 & ~0x01010101) {
1996  for (x = 0; x < 4; x++) {
1997  if ((uint8_t) nnz4 == 1)
1998  s->vp8dsp.vp8_idct_dc_add(y_dst + 4 * x,
1999  td->block[y][x],
2000  s->linesize);
2001  else if ((uint8_t) nnz4 > 1)
2002  s->vp8dsp.vp8_idct_add(y_dst + 4 * x,
2003  td->block[y][x],
2004  s->linesize);
2005  nnz4 >>= 8;
2006  if (!nnz4)
2007  break;
2008  }
2009  } else {
2010  s->vp8dsp.vp8_idct_dc_add4y(y_dst, td->block[y], s->linesize);
2011  }
2012  }
2013  y_dst += 4 * s->linesize;
2014  }
2015  }
2016 
2017  for (ch = 0; ch < 2; ch++) {
2018  uint32_t nnz4 = AV_RL32(td->non_zero_count_cache[4 + ch]);
2019  if (nnz4) {
2020  uint8_t *ch_dst = dst[1 + ch];
2021  if (nnz4 & ~0x01010101) {
2022  for (y = 0; y < 2; y++) {
2023  for (x = 0; x < 2; x++) {
2024  if ((uint8_t) nnz4 == 1)
2025  s->vp8dsp.vp8_idct_dc_add(ch_dst + 4 * x,
2026  td->block[4 + ch][(y << 1) + x],
2027  s->uvlinesize);
2028  else if ((uint8_t) nnz4 > 1)
2029  s->vp8dsp.vp8_idct_add(ch_dst + 4 * x,
2030  td->block[4 + ch][(y << 1) + x],
2031  s->uvlinesize);
2032  nnz4 >>= 8;
2033  if (!nnz4)
2034  goto chroma_idct_end;
2035  }
2036  ch_dst += 4 * s->uvlinesize;
2037  }
2038  } else {
2039  s->vp8dsp.vp8_idct_dc_add4uv(ch_dst, td->block[4 + ch], s->uvlinesize);
2040  }
2041  }
2042 chroma_idct_end:
2043  ;
2044  }
2045 }
2046 
2047 static av_always_inline
2049  VP8FilterStrength *f, int is_vp7)
2050 {
2051  int interior_limit, filter_level;
2052 
2053  if (s->segmentation.enabled) {
2054  filter_level = s->segmentation.filter_level[mb->segment];
2055  if (!s->segmentation.absolute_vals)
2056  filter_level += s->filter.level;
2057  } else
2058  filter_level = s->filter.level;
2059 
2060  if (s->lf_delta.enabled) {
2061  filter_level += s->lf_delta.ref[mb->ref_frame];
2062  filter_level += s->lf_delta.mode[mb->mode];
2063  }
2064 
2065  filter_level = av_clip_uintp2(filter_level, 6);
2066 
2067  interior_limit = filter_level;
2068  if (s->filter.sharpness) {
2069  interior_limit >>= (s->filter.sharpness + 3) >> 2;
2070  interior_limit = FFMIN(interior_limit, 9 - s->filter.sharpness);
2071  }
2072  interior_limit = FFMAX(interior_limit, 1);
2073 
2074  f->filter_level = filter_level;
2075  f->inner_limit = interior_limit;
2076  f->inner_filter = is_vp7 || !mb->skip || mb->mode == MODE_I4x4 ||
2077  mb->mode == VP8_MVMODE_SPLIT;
2078 }
2079 
2080 static av_always_inline
2082  int mb_x, int mb_y, int is_vp7)
2083 {
2084  int mbedge_lim, bedge_lim_y, bedge_lim_uv, hev_thresh;
2085  int filter_level = f->filter_level;
2086  int inner_limit = f->inner_limit;
2087  int inner_filter = f->inner_filter;
2088  int linesize = s->linesize;
2089  int uvlinesize = s->uvlinesize;
2090  static const uint8_t hev_thresh_lut[2][64] = {
2091  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
2092  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2093  3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
2094  3, 3, 3, 3 },
2095  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
2096  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
2097  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2098  2, 2, 2, 2 }
2099  };
2100 
2101  if (!filter_level)
2102  return;
2103 
2104  if (is_vp7) {
2105  bedge_lim_y = filter_level;
2106  bedge_lim_uv = filter_level * 2;
2107  mbedge_lim = filter_level + 2;
2108  } else {
2109  bedge_lim_y =
2110  bedge_lim_uv = filter_level * 2 + inner_limit;
2111  mbedge_lim = bedge_lim_y + 4;
2112  }
2113 
2114  hev_thresh = hev_thresh_lut[s->keyframe][filter_level];
2115 
2116  if (mb_x) {
2117  s->vp8dsp.vp8_h_loop_filter16y(dst[0], linesize,
2118  mbedge_lim, inner_limit, hev_thresh);
2119  s->vp8dsp.vp8_h_loop_filter8uv(dst[1], dst[2], uvlinesize,
2120  mbedge_lim, inner_limit, hev_thresh);
2121  }
2122 
2123 #define H_LOOP_FILTER_16Y_INNER(cond) \
2124  if (cond && inner_filter) { \
2125  s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0] + 4, linesize, \
2126  bedge_lim_y, inner_limit, \
2127  hev_thresh); \
2128  s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0] + 8, linesize, \
2129  bedge_lim_y, inner_limit, \
2130  hev_thresh); \
2131  s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0] + 12, linesize, \
2132  bedge_lim_y, inner_limit, \
2133  hev_thresh); \
2134  s->vp8dsp.vp8_h_loop_filter8uv_inner(dst[1] + 4, dst[2] + 4, \
2135  uvlinesize, bedge_lim_uv, \
2136  inner_limit, hev_thresh); \
2137  }
2138 
2139  H_LOOP_FILTER_16Y_INNER(!is_vp7)
2140 
2141  if (mb_y) {
2142  s->vp8dsp.vp8_v_loop_filter16y(dst[0], linesize,
2143  mbedge_lim, inner_limit, hev_thresh);
2144  s->vp8dsp.vp8_v_loop_filter8uv(dst[1], dst[2], uvlinesize,
2145  mbedge_lim, inner_limit, hev_thresh);
2146  }
2147 
2148  if (inner_filter) {
2149  s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0] + 4 * linesize,
2150  linesize, bedge_lim_y,
2151  inner_limit, hev_thresh);
2152  s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0] + 8 * linesize,
2153  linesize, bedge_lim_y,
2154  inner_limit, hev_thresh);
2155  s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0] + 12 * linesize,
2156  linesize, bedge_lim_y,
2157  inner_limit, hev_thresh);
2158  s->vp8dsp.vp8_v_loop_filter8uv_inner(dst[1] + 4 * uvlinesize,
2159  dst[2] + 4 * uvlinesize,
2160  uvlinesize, bedge_lim_uv,
2161  inner_limit, hev_thresh);
2162  }
2163 
2164  H_LOOP_FILTER_16Y_INNER(is_vp7)
2165 }
2166 
2167 static av_always_inline
2169  int mb_x, int mb_y)
2170 {
2171  int mbedge_lim, bedge_lim;
2172  int filter_level = f->filter_level;
2173  int inner_limit = f->inner_limit;
2174  int inner_filter = f->inner_filter;
2175  int linesize = s->linesize;
2176 
2177  if (!filter_level)
2178  return;
2179 
2180  bedge_lim = 2 * filter_level + inner_limit;
2181  mbedge_lim = bedge_lim + 4;
2182 
2183  if (mb_x)
2184  s->vp8dsp.vp8_h_loop_filter_simple(dst, linesize, mbedge_lim);
2185  if (inner_filter) {
2186  s->vp8dsp.vp8_h_loop_filter_simple(dst + 4, linesize, bedge_lim);
2187  s->vp8dsp.vp8_h_loop_filter_simple(dst + 8, linesize, bedge_lim);
2188  s->vp8dsp.vp8_h_loop_filter_simple(dst + 12, linesize, bedge_lim);
2189  }
2190 
2191  if (mb_y)
2192  s->vp8dsp.vp8_v_loop_filter_simple(dst, linesize, mbedge_lim);
2193  if (inner_filter) {
2194  s->vp8dsp.vp8_v_loop_filter_simple(dst + 4 * linesize, linesize, bedge_lim);
2195  s->vp8dsp.vp8_v_loop_filter_simple(dst + 8 * linesize, linesize, bedge_lim);
2196  s->vp8dsp.vp8_v_loop_filter_simple(dst + 12 * linesize, linesize, bedge_lim);
2197  }
2198 }
2199 
2200 #define MARGIN (16 << 2)
2201 static av_always_inline
2203  VP8Frame *prev_frame, int is_vp7)
2204 {
2205  VP8Context *s = avctx->priv_data;
2206  int mb_x, mb_y;
2207 
2208  s->mv_min.y = -MARGIN;
2209  s->mv_max.y = ((s->mb_height - 1) << 6) + MARGIN;
2210  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
2212  ((s->mb_width + 1) * (mb_y + 1) + 1);
2213  int mb_xy = mb_y * s->mb_width;
2214 
2215  AV_WN32A(s->intra4x4_pred_mode_left, DC_PRED * 0x01010101);
2216 
2217  s->mv_min.x = -MARGIN;
2218  s->mv_max.x = ((s->mb_width - 1) << 6) + MARGIN;
2219  for (mb_x = 0; mb_x < s->mb_width; mb_x++, mb_xy++, mb++) {
2220  if (mb_y == 0)
2221  AV_WN32A((mb - s->mb_width - 1)->intra4x4_pred_mode_top,
2222  DC_PRED * 0x01010101);
2223  decode_mb_mode(s, mb, mb_x, mb_y, curframe->seg_map->data + mb_xy,
2224  prev_frame && prev_frame->seg_map ?
2225  prev_frame->seg_map->data + mb_xy : NULL, 1, is_vp7);
2226  s->mv_min.x -= 64;
2227  s->mv_max.x -= 64;
2228  }
2229  s->mv_min.y -= 64;
2230  s->mv_max.y -= 64;
2231  }
2232 }
2233 
2234 static void vp7_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *cur_frame,
2235  VP8Frame *prev_frame)
2236 {
2237  vp78_decode_mv_mb_modes(avctx, cur_frame, prev_frame, IS_VP7);
2238 }
2239 
2240 static void vp8_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *cur_frame,
2241  VP8Frame *prev_frame)
2242 {
2243  vp78_decode_mv_mb_modes(avctx, cur_frame, prev_frame, IS_VP8);
2244 }
2245 
2246 #if HAVE_THREADS
2247 #define check_thread_pos(td, otd, mb_x_check, mb_y_check) \
2248  do { \
2249  int tmp = (mb_y_check << 16) | (mb_x_check & 0xFFFF); \
2250  if (otd->thread_mb_pos < tmp) { \
2251  pthread_mutex_lock(&otd->lock); \
2252  td->wait_mb_pos = tmp; \
2253  do { \
2254  if (otd->thread_mb_pos >= tmp) \
2255  break; \
2256  pthread_cond_wait(&otd->cond, &otd->lock); \
2257  } while (1); \
2258  td->wait_mb_pos = INT_MAX; \
2259  pthread_mutex_unlock(&otd->lock); \
2260  } \
2261  } while (0)
2262 
2263 #define update_pos(td, mb_y, mb_x) \
2264  do { \
2265  int pos = (mb_y << 16) | (mb_x & 0xFFFF); \
2266  int sliced_threading = (avctx->active_thread_type == FF_THREAD_SLICE) && \
2267  (num_jobs > 1); \
2268  int is_null = !next_td || !prev_td; \
2269  int pos_check = (is_null) ? 1 \
2270  : (next_td != td && \
2271  pos >= next_td->wait_mb_pos) || \
2272  (prev_td != td && \
2273  pos >= prev_td->wait_mb_pos); \
2274  td->thread_mb_pos = pos; \
2275  if (sliced_threading && pos_check) { \
2276  pthread_mutex_lock(&td->lock); \
2277  pthread_cond_broadcast(&td->cond); \
2278  pthread_mutex_unlock(&td->lock); \
2279  } \
2280  } while (0)
2281 #else
2282 #define check_thread_pos(td, otd, mb_x_check, mb_y_check) while(0)
2283 #define update_pos(td, mb_y, mb_x) while(0)
2284 #endif
2285 
2287  int jobnr, int threadnr, int is_vp7)
2288 {
2289  VP8Context *s = avctx->priv_data;
2290  VP8ThreadData *prev_td, *next_td, *td = &s->thread_data[threadnr];
2291  int mb_y = td->thread_mb_pos >> 16;
2292  int mb_x, mb_xy = mb_y * s->mb_width;
2293  int num_jobs = s->num_jobs;
2294  VP8Frame *curframe = s->curframe, *prev_frame = s->prev_frame;
2295  VP56RangeCoder *c = &s->coeff_partition[mb_y & (s->num_coeff_partitions - 1)];
2296  VP8Macroblock *mb;
2297  uint8_t *dst[3] = {
2298  curframe->tf.f->data[0] + 16 * mb_y * s->linesize,
2299  curframe->tf.f->data[1] + 8 * mb_y * s->uvlinesize,
2300  curframe->tf.f->data[2] + 8 * mb_y * s->uvlinesize
2301  };
2302 
2303  if (vpX_rac_is_end(c))
2304  return AVERROR_INVALIDDATA;
2305 
2306  if (mb_y == 0)
2307  prev_td = td;
2308  else
2309  prev_td = &s->thread_data[(jobnr + num_jobs - 1) % num_jobs];
2310  if (mb_y == s->mb_height - 1)
2311  next_td = td;
2312  else
2313  next_td = &s->thread_data[(jobnr + 1) % num_jobs];
2314  if (s->mb_layout == 1)
2315  mb = s->macroblocks_base + ((s->mb_width + 1) * (mb_y + 1) + 1);
2316  else {
2317  // Make sure the previous frame has read its segmentation map,
2318  // if we re-use the same map.
2319  if (prev_frame && s->segmentation.enabled &&
2321  ff_thread_await_progress(&prev_frame->tf, mb_y, 0);
2322  mb = s->macroblocks + (s->mb_height - mb_y - 1) * 2;
2323  memset(mb - 1, 0, sizeof(*mb)); // zero left macroblock
2324  AV_WN32A(s->intra4x4_pred_mode_left, DC_PRED * 0x01010101);
2325  }
2326 
2327  if (!is_vp7 || mb_y == 0)
2328  memset(td->left_nnz, 0, sizeof(td->left_nnz));
2329 
2330  s->mv_min.x = -MARGIN;
2331  s->mv_max.x = ((s->mb_width - 1) << 6) + MARGIN;
2332 
2333  for (mb_x = 0; mb_x < s->mb_width; mb_x++, mb_xy++, mb++) {
2334  if (vpX_rac_is_end(c))
2335  return AVERROR_INVALIDDATA;
2336  // Wait for previous thread to read mb_x+2, and reach mb_y-1.
2337  if (prev_td != td) {
2338  if (threadnr != 0) {
2339  check_thread_pos(td, prev_td,
2340  mb_x + (is_vp7 ? 2 : 1),
2341  mb_y - (is_vp7 ? 2 : 1));
2342  } else {
2343  check_thread_pos(td, prev_td,
2344  mb_x + (is_vp7 ? 2 : 1) + s->mb_width + 3,
2345  mb_y - (is_vp7 ? 2 : 1));
2346  }
2347  }
2348 
2349  s->vdsp.prefetch(dst[0] + (mb_x & 3) * 4 * s->linesize + 64,
2350  s->linesize, 4);
2351  s->vdsp.prefetch(dst[1] + (mb_x & 7) * s->uvlinesize + 64,
2352  dst[2] - dst[1], 2);
2353 
2354  if (!s->mb_layout)
2355  decode_mb_mode(s, mb, mb_x, mb_y, curframe->seg_map->data + mb_xy,
2356  prev_frame && prev_frame->seg_map ?
2357  prev_frame->seg_map->data + mb_xy : NULL, 0, is_vp7);
2358 
2359  prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_PREVIOUS);
2360 
2361  if (!mb->skip)
2362  decode_mb_coeffs(s, td, c, mb, s->top_nnz[mb_x], td->left_nnz, is_vp7);
2363 
2364  if (mb->mode <= MODE_I4x4)
2365  intra_predict(s, td, dst, mb, mb_x, mb_y, is_vp7);
2366  else
2367  inter_predict(s, td, dst, mb, mb_x, mb_y);
2368 
2369  prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_GOLDEN);
2370 
2371  if (!mb->skip) {
2372  idct_mb(s, td, dst, mb);
2373  } else {
2374  AV_ZERO64(td->left_nnz);
2375  AV_WN64(s->top_nnz[mb_x], 0); // array of 9, so unaligned
2376 
2377  /* Reset DC block predictors if they would exist
2378  * if the mb had coefficients */
2379  if (mb->mode != MODE_I4x4 && mb->mode != VP8_MVMODE_SPLIT) {
2380  td->left_nnz[8] = 0;
2381  s->top_nnz[mb_x][8] = 0;
2382  }
2383  }
2384 
2385  if (s->deblock_filter)
2386  filter_level_for_mb(s, mb, &td->filter_strength[mb_x], is_vp7);
2387 
2388  if (s->deblock_filter && num_jobs != 1 && threadnr == num_jobs - 1) {
2389  if (s->filter.simple)
2390  backup_mb_border(s->top_border[mb_x + 1], dst[0],
2391  NULL, NULL, s->linesize, 0, 1);
2392  else
2393  backup_mb_border(s->top_border[mb_x + 1], dst[0],
2394  dst[1], dst[2], s->linesize, s->uvlinesize, 0);
2395  }
2396 
2397  prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_GOLDEN2);
2398 
2399  dst[0] += 16;
2400  dst[1] += 8;
2401  dst[2] += 8;
2402  s->mv_min.x -= 64;
2403  s->mv_max.x -= 64;
2404 
2405  if (mb_x == s->mb_width + 1) {
2406  update_pos(td, mb_y, s->mb_width + 3);
2407  } else {
2408  update_pos(td, mb_y, mb_x);
2409  }
2410  }
2411  return 0;
2412 }
2413 
2414 static int vp7_decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata,
2415  int jobnr, int threadnr)
2416 {
2417  return decode_mb_row_no_filter(avctx, tdata, jobnr, threadnr, 1);
2418 }
2419 
2420 static int vp8_decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata,
2421  int jobnr, int threadnr)
2422 {
2423  return decode_mb_row_no_filter(avctx, tdata, jobnr, threadnr, 0);
2424 }
2425 
2426 static av_always_inline void filter_mb_row(AVCodecContext *avctx, void *tdata,
2427  int jobnr, int threadnr, int is_vp7)
2428 {
2429  VP8Context *s = avctx->priv_data;
2430  VP8ThreadData *td = &s->thread_data[threadnr];
2431  int mb_x, mb_y = td->thread_mb_pos >> 16, num_jobs = s->num_jobs;
2432  AVFrame *curframe = s->curframe->tf.f;
2433  VP8Macroblock *mb;
2434  VP8ThreadData *prev_td, *next_td;
2435  uint8_t *dst[3] = {
2436  curframe->data[0] + 16 * mb_y * s->linesize,
2437  curframe->data[1] + 8 * mb_y * s->uvlinesize,
2438  curframe->data[2] + 8 * mb_y * s->uvlinesize
2439  };
2440 
2441  if (s->mb_layout == 1)
2442  mb = s->macroblocks_base + ((s->mb_width + 1) * (mb_y + 1) + 1);
2443  else
2444  mb = s->macroblocks + (s->mb_height - mb_y - 1) * 2;
2445 
2446  if (mb_y == 0)
2447  prev_td = td;
2448  else
2449  prev_td = &s->thread_data[(jobnr + num_jobs - 1) % num_jobs];
2450  if (mb_y == s->mb_height - 1)
2451  next_td = td;
2452  else
2453  next_td = &s->thread_data[(jobnr + 1) % num_jobs];
2454 
2455  for (mb_x = 0; mb_x < s->mb_width; mb_x++, mb++) {
2456  VP8FilterStrength *f = &td->filter_strength[mb_x];
2457  if (prev_td != td)
2458  check_thread_pos(td, prev_td,
2459  (mb_x + 1) + (s->mb_width + 3), mb_y - 1);
2460  if (next_td != td)
2461  if (next_td != &s->thread_data[0])
2462  check_thread_pos(td, next_td, mb_x + 1, mb_y + 1);
2463 
2464  if (num_jobs == 1) {
2465  if (s->filter.simple)
2466  backup_mb_border(s->top_border[mb_x + 1], dst[0],
2467  NULL, NULL, s->linesize, 0, 1);
2468  else
2469  backup_mb_border(s->top_border[mb_x + 1], dst[0],
2470  dst[1], dst[2], s->linesize, s->uvlinesize, 0);
2471  }
2472 
2473  if (s->filter.simple)
2474  filter_mb_simple(s, dst[0], f, mb_x, mb_y);
2475  else
2476  filter_mb(s, dst, f, mb_x, mb_y, is_vp7);
2477  dst[0] += 16;
2478  dst[1] += 8;
2479  dst[2] += 8;
2480 
2481  update_pos(td, mb_y, (s->mb_width + 3) + mb_x);
2482  }
2483 }
2484 
2485 static void vp7_filter_mb_row(AVCodecContext *avctx, void *tdata,
2486  int jobnr, int threadnr)
2487 {
2488  filter_mb_row(avctx, tdata, jobnr, threadnr, 1);
2489 }
2490 
2491 static void vp8_filter_mb_row(AVCodecContext *avctx, void *tdata,
2492  int jobnr, int threadnr)
2493 {
2494  filter_mb_row(avctx, tdata, jobnr, threadnr, 0);
2495 }
2496 
2497 static av_always_inline
2498 int vp78_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata, int jobnr,
2499  int threadnr, int is_vp7)
2500 {
2501  VP8Context *s = avctx->priv_data;
2502  VP8ThreadData *td = &s->thread_data[jobnr];
2503  VP8ThreadData *next_td = NULL, *prev_td = NULL;
2504  VP8Frame *curframe = s->curframe;
2505  int mb_y, num_jobs = s->num_jobs;
2506  int ret;
2507 
2508  td->thread_nr = threadnr;
2509  for (mb_y = jobnr; mb_y < s->mb_height; mb_y += num_jobs) {
2510  td->thread_mb_pos = mb_y << 16;
2511  ret = s->decode_mb_row_no_filter(avctx, tdata, jobnr, threadnr);
2512  if (ret < 0) {
2513  update_pos(td, s->mb_height, INT_MAX & 0xFFFF);
2514  return ret;
2515  }
2516  if (s->deblock_filter)
2517  s->filter_mb_row(avctx, tdata, jobnr, threadnr);
2518  update_pos(td, mb_y, INT_MAX & 0xFFFF);
2519 
2520  s->mv_min.y -= 64;
2521  s->mv_max.y -= 64;
2522 
2523  if (avctx->active_thread_type == FF_THREAD_FRAME)
2524  ff_thread_report_progress(&curframe->tf, mb_y, 0);
2525  }
2526 
2527  return 0;
2528 }
2529 
2530 static int vp7_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata,
2531  int jobnr, int threadnr)
2532 {
2533  return vp78_decode_mb_row_sliced(avctx, tdata, jobnr, threadnr, IS_VP7);
2534 }
2535 
2536 static int vp8_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata,
2537  int jobnr, int threadnr)
2538 {
2539  return vp78_decode_mb_row_sliced(avctx, tdata, jobnr, threadnr, IS_VP8);
2540 }
2541 
2542 
2543 static av_always_inline
2544 int vp78_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
2545  AVPacket *avpkt, int is_vp7)
2546 {
2547  VP8Context *s = avctx->priv_data;
2548  int ret, i, referenced, num_jobs;
2549  enum AVDiscard skip_thresh;
2550  VP8Frame *av_uninit(curframe), *prev_frame;
2551 
2553 
2554  if (is_vp7)
2555  ret = vp7_decode_frame_header(s, avpkt->data, avpkt->size);
2556  else
2557  ret = vp8_decode_frame_header(s, avpkt->data, avpkt->size);
2558 
2559  if (ret < 0)
2560  goto err;
2561 
2562  prev_frame = s->framep[VP56_FRAME_CURRENT];
2563 
2564  referenced = s->update_last || s->update_golden == VP56_FRAME_CURRENT ||
2566 
2567  skip_thresh = !referenced ? AVDISCARD_NONREF
2568  : !s->keyframe ? AVDISCARD_NONKEY
2569  : AVDISCARD_ALL;
2570 
2571  if (avctx->skip_frame >= skip_thresh) {
2572  s->invisible = 1;
2573  memcpy(&s->next_framep[0], &s->framep[0], sizeof(s->framep[0]) * 4);
2574  goto skip_decode;
2575  }
2576  s->deblock_filter = s->filter.level && avctx->skip_loop_filter < skip_thresh;
2577 
2578  // release no longer referenced frames
2579  for (i = 0; i < 5; i++)
2580  if (s->frames[i].tf.f->data[0] &&
2581  &s->frames[i] != prev_frame &&
2582  &s->frames[i] != s->framep[VP56_FRAME_PREVIOUS] &&
2583  &s->frames[i] != s->framep[VP56_FRAME_GOLDEN] &&
2584  &s->frames[i] != s->framep[VP56_FRAME_GOLDEN2])
2585  vp8_release_frame(s, &s->frames[i]);
2586 
2587  curframe = s->framep[VP56_FRAME_CURRENT] = vp8_find_free_buffer(s);
2588 
2589  if (!s->colorspace)
2590  avctx->colorspace = AVCOL_SPC_BT470BG;
2591  if (s->fullrange)
2592  avctx->color_range = AVCOL_RANGE_JPEG;
2593  else
2594  avctx->color_range = AVCOL_RANGE_MPEG;
2595 
2596  /* Given that arithmetic probabilities are updated every frame, it's quite
2597  * likely that the values we have on a random interframe are complete
2598  * junk if we didn't start decode on a keyframe. So just don't display
2599  * anything rather than junk. */
2600  if (!s->keyframe && (!s->framep[VP56_FRAME_PREVIOUS] ||
2601  !s->framep[VP56_FRAME_GOLDEN] ||
2602  !s->framep[VP56_FRAME_GOLDEN2])) {
2603  av_log(avctx, AV_LOG_WARNING,
2604  "Discarding interframe without a prior keyframe!\n");
2605  ret = AVERROR_INVALIDDATA;
2606  goto err;
2607  }
2608 
2609  curframe->tf.f->key_frame = s->keyframe;
2610  curframe->tf.f->pict_type = s->keyframe ? AV_PICTURE_TYPE_I
2612  if ((ret = vp8_alloc_frame(s, curframe, referenced)) < 0)
2613  goto err;
2614 
2615  // check if golden and altref are swapped
2616  if (s->update_altref != VP56_FRAME_NONE)
2618  else
2620 
2621  if (s->update_golden != VP56_FRAME_NONE)
2623  else
2625 
2626  if (s->update_last)
2627  s->next_framep[VP56_FRAME_PREVIOUS] = curframe;
2628  else
2630 
2631  s->next_framep[VP56_FRAME_CURRENT] = curframe;
2632 
2633  if (avctx->codec->update_thread_context)
2634  ff_thread_finish_setup(avctx);
2635 
2636  s->linesize = curframe->tf.f->linesize[0];
2637  s->uvlinesize = curframe->tf.f->linesize[1];
2638 
2639  memset(s->top_nnz, 0, s->mb_width * sizeof(*s->top_nnz));
2640  /* Zero macroblock structures for top/top-left prediction
2641  * from outside the frame. */
2642  if (!s->mb_layout)
2643  memset(s->macroblocks + s->mb_height * 2 - 1, 0,
2644  (s->mb_width + 1) * sizeof(*s->macroblocks));
2645  if (!s->mb_layout && s->keyframe)
2646  memset(s->intra4x4_pred_mode_top, DC_PRED, s->mb_width * 4);
2647 
2648  memset(s->ref_count, 0, sizeof(s->ref_count));
2649 
2650  if (s->mb_layout == 1) {
2651  // Make sure the previous frame has read its segmentation map,
2652  // if we re-use the same map.
2653  if (prev_frame && s->segmentation.enabled &&
2655  ff_thread_await_progress(&prev_frame->tf, 1, 0);
2656  if (is_vp7)
2657  vp7_decode_mv_mb_modes(avctx, curframe, prev_frame);
2658  else
2659  vp8_decode_mv_mb_modes(avctx, curframe, prev_frame);
2660  }
2661 
2662  if (avctx->active_thread_type == FF_THREAD_FRAME)
2663  num_jobs = 1;
2664  else
2665  num_jobs = FFMIN(s->num_coeff_partitions, avctx->thread_count);
2666  s->num_jobs = num_jobs;
2667  s->curframe = curframe;
2668  s->prev_frame = prev_frame;
2669  s->mv_min.y = -MARGIN;
2670  s->mv_max.y = ((s->mb_height - 1) << 6) + MARGIN;
2671  for (i = 0; i < MAX_THREADS; i++) {
2672  s->thread_data[i].thread_mb_pos = 0;
2673  s->thread_data[i].wait_mb_pos = INT_MAX;
2674  }
2675  if (is_vp7)
2677  num_jobs);
2678  else
2680  num_jobs);
2681 
2682  ff_thread_report_progress(&curframe->tf, INT_MAX, 0);
2683  memcpy(&s->framep[0], &s->next_framep[0], sizeof(s->framep[0]) * 4);
2684 
2685 skip_decode:
2686  // if future frames don't use the updated probabilities,
2687  // reset them to the values we saved
2688  if (!s->update_probabilities)
2689  s->prob[0] = s->prob[1];
2690 
2691  if (!s->invisible) {
2692  if ((ret = av_frame_ref(data, curframe->tf.f)) < 0)
2693  return ret;
2694  *got_frame = 1;
2695  }
2696 
2697  return avpkt->size;
2698 err:
2699  memcpy(&s->next_framep[0], &s->framep[0], sizeof(s->framep[0]) * 4);
2700  return ret;
2701 }
2702 
2703 int ff_vp8_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
2704  AVPacket *avpkt)
2705 {
2706  return vp78_decode_frame(avctx, data, got_frame, avpkt, IS_VP8);
2707 }
2708 
2709 #if CONFIG_VP7_DECODER
2710 static int vp7_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
2711  AVPacket *avpkt)
2712 {
2713  return vp78_decode_frame(avctx, data, got_frame, avpkt, IS_VP7);
2714 }
2715 #endif /* CONFIG_VP7_DECODER */
2716 
2718 {
2719  VP8Context *s = avctx->priv_data;
2720  int i;
2721 
2722  if (!s)
2723  return 0;
2724 
2725  vp8_decode_flush_impl(avctx, 1);
2726  for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++)
2727  av_frame_free(&s->frames[i].tf.f);
2728 
2729  return 0;
2730 }
2731 
2733 {
2734  int i;
2735  for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++) {
2736  s->frames[i].tf.f = av_frame_alloc();
2737  if (!s->frames[i].tf.f)
2738  return AVERROR(ENOMEM);
2739  }
2740  return 0;
2741 }
2742 
2743 static av_always_inline
2744 int vp78_decode_init(AVCodecContext *avctx, int is_vp7)
2745 {
2746  VP8Context *s = avctx->priv_data;
2747  int ret;
2748 
2749  s->avctx = avctx;
2750  s->vp7 = avctx->codec->id == AV_CODEC_ID_VP7;
2751  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
2752  avctx->internal->allocate_progress = 1;
2753 
2754  ff_videodsp_init(&s->vdsp, 8);
2755 
2756  ff_vp78dsp_init(&s->vp8dsp);
2757  if (CONFIG_VP7_DECODER && is_vp7) {
2759  ff_vp7dsp_init(&s->vp8dsp);
2762  } else if (CONFIG_VP8_DECODER && !is_vp7) {
2764  ff_vp8dsp_init(&s->vp8dsp);
2767  }
2768 
2769  /* does not change for VP8 */
2770  memcpy(s->prob[0].scan, zigzag_scan, sizeof(s->prob[0].scan));
2771 
2772  if ((ret = vp8_init_frames(s)) < 0) {
2773  ff_vp8_decode_free(avctx);
2774  return ret;
2775  }
2776 
2777  return 0;
2778 }
2779 
2780 #if CONFIG_VP7_DECODER
2781 static int vp7_decode_init(AVCodecContext *avctx)
2782 {
2783  return vp78_decode_init(avctx, IS_VP7);
2784 }
2785 #endif /* CONFIG_VP7_DECODER */
2786 
2788 {
2789  return vp78_decode_init(avctx, IS_VP8);
2790 }
2791 
2792 #if CONFIG_VP8_DECODER
2793 static av_cold int vp8_decode_init_thread_copy(AVCodecContext *avctx)
2794 {
2795  VP8Context *s = avctx->priv_data;
2796  int ret;
2797 
2798  s->avctx = avctx;
2799 
2800  if ((ret = vp8_init_frames(s)) < 0) {
2801  ff_vp8_decode_free(avctx);
2802  return ret;
2803  }
2804 
2805  return 0;
2806 }
2807 
2808 #define REBASE(pic) ((pic) ? (pic) - &s_src->frames[0] + &s->frames[0] : NULL)
2809 
2810 static int vp8_decode_update_thread_context(AVCodecContext *dst,
2811  const AVCodecContext *src)
2812 {
2813  VP8Context *s = dst->priv_data, *s_src = src->priv_data;
2814  int i;
2815 
2816  if (s->macroblocks_base &&
2817  (s_src->mb_width != s->mb_width || s_src->mb_height != s->mb_height)) {
2818  free_buffers(s);
2819  s->mb_width = s_src->mb_width;
2820  s->mb_height = s_src->mb_height;
2821  }
2822 
2823  s->prob[0] = s_src->prob[!s_src->update_probabilities];
2824  s->segmentation = s_src->segmentation;
2825  s->lf_delta = s_src->lf_delta;
2826  memcpy(s->sign_bias, s_src->sign_bias, sizeof(s->sign_bias));
2827 
2828  for (i = 0; i < FF_ARRAY_ELEMS(s_src->frames); i++) {
2829  if (s_src->frames[i].tf.f->data[0]) {
2830  int ret = vp8_ref_frame(s, &s->frames[i], &s_src->frames[i]);
2831  if (ret < 0)
2832  return ret;
2833  }
2834  }
2835 
2836  s->framep[0] = REBASE(s_src->next_framep[0]);
2837  s->framep[1] = REBASE(s_src->next_framep[1]);
2838  s->framep[2] = REBASE(s_src->next_framep[2]);
2839  s->framep[3] = REBASE(s_src->next_framep[3]);
2840 
2841  return 0;
2842 }
2843 #endif /* CONFIG_VP8_DECODER */
2844 
2845 #if CONFIG_VP7_DECODER
2846 AVCodec ff_vp7_decoder = {
2847  .name = "vp7",
2848  .long_name = NULL_IF_CONFIG_SMALL("On2 VP7"),
2849  .type = AVMEDIA_TYPE_VIDEO,
2850  .id = AV_CODEC_ID_VP7,
2851  .priv_data_size = sizeof(VP8Context),
2852  .init = vp7_decode_init,
2853  .close = ff_vp8_decode_free,
2854  .decode = vp7_decode_frame,
2855  .capabilities = AV_CODEC_CAP_DR1,
2857 };
2858 #endif /* CONFIG_VP7_DECODER */
2859 
2860 #if CONFIG_VP8_DECODER
2861 AVCodec ff_vp8_decoder = {
2862  .name = "vp8",
2863  .long_name = NULL_IF_CONFIG_SMALL("On2 VP8"),
2864  .type = AVMEDIA_TYPE_VIDEO,
2865  .id = AV_CODEC_ID_VP8,
2866  .priv_data_size = sizeof(VP8Context),
2868  .close = ff_vp8_decode_free,
2870  .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS |
2873  .init_thread_copy = ONLY_IF_THREADS_ENABLED(vp8_decode_init_thread_copy),
2874  .update_thread_context = ONLY_IF_THREADS_ENABLED(vp8_decode_update_thread_context),
2875 };
2876 #endif /* CONFIG_VP7_DECODER */
uint8_t golden
Definition: vp8.h:242
uint8_t inner_limit
Definition: vp8.h:82
#define VERT_PRED8x8
Definition: h264pred.h:70
VP8Macroblock * macroblocks
Definition: vp8.h:185
static const uint8_t vp8_dc_qlookup[VP8_MAX_QUANT+1]
Definition: vp8data.h:719
static av_always_inline void intra_predict(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], VP8Macroblock *mb, int mb_x, int mb_y, int is_vp7)
Definition: vp8.c:1596
static const uint8_t vp8_submv_prob[5][3]
Definition: vp8data.h:153
static const uint16_t vp7_ydc_qlookup[]
Definition: vp8data.h:786
#define NULL
Definition: coverity.c:32
const struct AVCodec * codec
Definition: avcodec.h:1521
discard all frames except keyframes
Definition: avcodec.h:688
Definition: vp9.h:47
void(* prefetch)(uint8_t *buf, ptrdiff_t stride, int h)
Prefetch memory into cache (if supported by hardware).
Definition: videodsp.h:76
const char * s
Definition: avisynth_c.h:631
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static const uint8_t vp7_mv_default_prob[2][17]
Definition: vp8data.h:752
#define DC_128_PRED8x8
Definition: h264pred.h:76
static av_always_inline int pthread_mutex_destroy(pthread_mutex_t *mutex)
Definition: os2threads.h:94
(only used in prediction) no split MVs
Definition: vp8.h:77
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:124
void ff_vp7dsp_init(VP8DSPContext *c)
static void update_lf_deltas(VP8Context *s)
Definition: vp8.c:235
This structure describes decoded (raw) audio or video data.
Definition: frame.h:171
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
static void flush(AVCodecContext *avctx)
static int vp8_decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.c:2420
static const uint8_t vp7_pred4x4_mode[]
Definition: vp8data.h:33
int8_t sign_bias[4]
one state [0, 1] per ref frame type
Definition: vp8.h:163
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:1706
static av_always_inline int inter_predict_dc(int16_t block[16], int16_t pred[2])
Definition: vp8.c:1321
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
#define VP7_MV_PRED_COUNT
Definition: vp8data.h:68
AVFrame * f
Definition: thread.h:36
static av_always_inline int vp8_rac_get_tree(VP56RangeCoder *c, const int8_t(*tree)[2], const uint8_t *probs)
Definition: vp56.h:390
uint8_t feature_value[4][4]
Definition: vp8.h:308
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:216
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
Definition: pixfmt.h:527
#define avpriv_request_sample(...)
uint8_t * intra4x4_pred_mode_top
Definition: vp8.h:187
uint8_t mbskip_enabled
Definition: vp8.h:158
static VP56Frame ref_to_update(VP8Context *s, int update, VP56Frame ref)
Determine which buffers golden and altref should be updated with after this frame.
Definition: vp8.c:351
void(* vp8_v_loop_filter16y)(uint8_t *dst, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:48
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:2247
static int vp7_decode_block_coeffs_internal(VP56RangeCoder *r, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS-1], int i, uint8_t *token_prob, int16_t qmul[2], const uint8_t scan[16])
Definition: vp8.c:1343
uint8_t token[4][16][3][NUM_DCT_TOKENS-1]
Definition: vp8.h:245
uint8_t scan[16]
Definition: vp8.h:247
int linesize
Definition: vp8.h:153
int size
Definition: avcodec.h:1434
const char * b
Definition: vf_curves.c:109
static void vp8_decode_flush(AVCodecContext *avctx)
Definition: vp8.c:119
#define MARGIN
Definition: vp8.c:2200
vp8_mc_func put_vp8_bilinear_pixels_tab[3][3][3]
Definition: vp8dsp.h:81
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1732
VP56mv bmv[16]
Definition: vp8.h:98
#define AV_RL16
Definition: intreadwrite.h:42
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, int chroma_format_idc)
Set the intra prediction function pointers.
Definition: h264pred.c:411
uint8_t inner_filter
Definition: vp8.h:83
static const int8_t vp8_pred8x8c_tree[3][2]
Definition: vp8data.h:180
uint8_t segmentid[3]
Definition: vp8.h:238
static const uint16_t vp7_y2dc_qlookup[]
Definition: vp8data.h:811
discard all
Definition: avcodec.h:689
void ff_thread_await_progress(ThreadFrame *f, int n, int field)
Wait for earlier decoding threads to finish reference pictures.
static void copy_chroma(AVFrame *dst, AVFrame *src, int width, int height)
Definition: vp8.c:426
#define HOR_PRED8x8
Definition: h264pred.h:69
AVCodec.
Definition: avcodec.h:3482
#define CONFIG_VP7_DECODER
Definition: config.h:824
uint8_t sharpness
Definition: vp8.h:182
#define AV_WN32A(p, v)
Definition: intreadwrite.h:538
2 16x8 blocks (vertical)
Definition: vp8.h:73
#define AV_COPY32(d, s)
Definition: intreadwrite.h:586
static av_always_inline int pthread_cond_destroy(pthread_cond_t *cond)
Definition: os2threads.h:124
int update_probabilities
If this flag is not set, all the probability updates are discarded after this frame is decoded...
Definition: vp8.h:260
VP8Frame * framep[4]
Definition: vp8.h:146
static int vp8_decode_block_coeffs_internal(VP56RangeCoder *r, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS-1], int i, uint8_t *token_prob, int16_t qmul[2])
Definition: vp8.c:1355
static void vp7_filter_mb_row(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.c:2485
#define VP7_MVC_SIZE
Definition: vp8.c:392
static int vp7_read_mv_component(VP56RangeCoder *c, const uint8_t *p)
Definition: vp8.c:815
vp8_mc_func put_vp8_epel_pixels_tab[3][3][3]
first dimension: width>>3, height is assumed equal to width second dimension: 0 if no vertical interp...
Definition: vp8dsp.h:80
static av_always_inline const uint8_t * get_submv_prob(uint32_t left, uint32_t top, int is_vp7)
Definition: vp8.c:826
static const uint8_t vp8_pred8x8c_prob_inter[3]
Definition: vp8data.h:189
static av_always_inline int decode_block_coeffs(VP56RangeCoder *c, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS-1], int i, int zero_nhood, int16_t qmul[2], const uint8_t scan[16], int vp7)
Definition: vp8.c:1380
uint8_t(* top_nnz)[9]
Definition: vp8.h:227
int num_jobs
Definition: vp8.h:277
static const uint8_t vp8_mbsplits[5][16]
Definition: vp8data.h:127
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:3236
#define AV_RN32A(p)
Definition: intreadwrite.h:526
uint8_t pred16x16[4]
Definition: vp8.h:243
static const int8_t vp8_pred16x16_tree_intra[4][2]
Definition: vp8data.h:47
uint8_t update_map
Definition: vp8.h:174
#define PLANE_PRED8x8
Definition: h264pred.h:71
uint16_t mb_height
Definition: vp8.h:152
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
int16_t y
Definition: vp56.h:67
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:63
int update_golden
VP56_FRAME_NONE if not updated, or which frame to copy if so.
Definition: vp8.h:253
static av_always_inline void filter_mb_row(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr, int is_vp7)
Definition: vp8.c:2426
uint8_t intra4x4_pred_mode_top[4]
Definition: vp8.h:96
static av_always_inline void xchg_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int mb_x, int mb_y, int mb_width, int simple, int xchg)
Definition: vp8.c:1482
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:107
uint8_t
static int vp7_update_dimensions(VP8Context *s, int width, int height)
Definition: vp8.c:203
#define av_cold
Definition: attributes.h:74
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:135
#define mb
#define DC_PRED8x8
Definition: h264pred.h:68
int fade_present
Fade bit present in bitstream (VP7)
Definition: vp8.h:293
static av_always_inline void vp7_decode_mvs(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, int layout)
Definition: vp8.c:944
mode
Definition: f_perms.c:27
static VP8Frame * vp8_find_free_buffer(VP8Context *s)
Definition: vp8.c:124
uint8_t ref_frame
Definition: vp8.h:91
static av_always_inline int check_intra_pred4x4_mode_emuedge(int mode, int mb_x, int mb_y, int *copy_buf, int vp7)
Definition: vp8.c:1560
Multithreading support functions.
Definition: vp9.h:46
#define TOP_DC_PRED8x8
Definition: h264pred.h:75
int ff_vp8_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: vp8.c:2703
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:366
uint8_t mvc[2][19]
Definition: vp8.h:246
VP56mv mv
Definition: vp8.h:97
int8_t base_quant[4]
Definition: vp8.h:175
static const uint8_t vp8_mv_update_prob[2][19]
Definition: vp8data.h:741
static AVFrame * frame
void(* pred8x8[4+3+4])(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.h:97
int update_last
update VP56_FRAME_PREVIOUS with the current one
Definition: vp8.h:252
uint8_t * data
Definition: avcodec.h:1433
int8_t yoffset
Definition: vp8data.h:62
int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
Definition: utils.c:3784
static av_always_inline int vpX_rac_is_end(VP56RangeCoder *c)
vp5689 returns 1 if the end of the stream has been reached, 0 otherwise.
Definition: vp56.h:233
static void copy(LZOContext *c, int cnt)
Copies bytes from input to output buffer with checking.
Definition: lzo.c:85
static void parse_segment_info(VP8Context *s)
Definition: vp8.c:214
ptrdiff_t size
Definition: opengl_enc.c:101
VP8Frame * prev_frame
Definition: vp8.h:149
int num_coeff_partitions
All coefficients are contained in separate arith coding contexts.
Definition: vp8.h:266
static const uint8_t vp8_token_default_probs[4][8][3][NUM_DCT_TOKENS-1]
Definition: vp8data.h:369
static void fade(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int width, int height, int alpha, int beta)
Definition: vp8.c:437
vp8_mc_func put_pixels_tab[3][3][3]
Definition: vp8.h:271
void ff_thread_finish_setup(AVCodecContext *avctx)
If the codec defines update_thread_context(), call this when they are ready for the next thread to st...
void(* pred4x4[9+3+3])(uint8_t *src, const uint8_t *topright, ptrdiff_t stride)
Definition: h264pred.h:93
#define AV_COPY64(d, s)
Definition: intreadwrite.h:590
struct VP8Context::@100 segmentation
Base parameters for segmentation, i.e.
uint8_t feature_index_prob[4][3]
Definition: vp8.h:307
uint8_t intra4x4_pred_mode_mb[16]
Definition: vp8.h:95
#define av_log(a,...)
static av_always_inline int vp78_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt, int is_vp7)
Definition: vp8.c:2544
unsigned m
Definition: audioconvert.c:187
uint8_t intra4x4_pred_mode_left[4]
Definition: vp8.h:188
#define VERT_VP8_PRED
for VP8, VERT_PRED is the average of
Definition: h264pred.h:60
av_cold void ff_vp78dsp_init(VP8DSPContext *dsp)
Definition: vp8dsp.c:666
uint8_t colorspace
0 is the only value allowed (meaning bt601)
Definition: vp8.h:274
static const VP56mv * get_bmv_ptr(const VP8Macroblock *mb, int subblock)
Definition: vp8.c:938
enum AVCodecID id
Definition: avcodec.h:3496
static const uint8_t vp8_mbsplit_count[4]
Definition: vp8data.h:142
static double alpha(void *priv, double x, double y)
Definition: vf_geq.c:99
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
static const int8_t vp8_coeff_band_indexes[8][10]
Definition: vp8data.h:331
#define td
Definition: regdef.h:70
H264PredContext hpc
Definition: vp8.h:270
Definition: vp8.h:132
static const uint8_t vp8_pred4x4_mode[]
Definition: vp8data.h:40
static av_always_inline void prefetch_motion(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, int mb_xy, int ref)
Definition: vp8.c:1879
void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
uint8_t absolute_vals
Definition: vp8.h:173
uint16_t mb_width
Definition: vp8.h:151
void(* vp8_luma_dc_wht_dc)(int16_t block[4][4][16], int16_t dc[16])
Definition: vp8dsp.h:39
static const uint8_t vp8_dct_cat2_prob[]
Definition: vp8data.h:345
static const uint8_t vp8_mv_default_prob[2][19]
Definition: vp8data.h:763
int profile
Definition: mxfenc.c:1820
#define FF_SIGNBIT(x)
Definition: internal.h:64
uint8_t last
Definition: vp8.h:241
static const int sizes[][2]
Definition: img2dec.c:48
#define AVERROR(e)
Definition: error.h:43
void(* vp8_h_loop_filter8uv)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:54
static int vp8_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
Definition: vp8.c:646
uint8_t mode
Definition: vp8.h:90
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:148
static av_always_inline int check_tm_pred8x8_mode(int mode, int mb_x, int mb_y, int vp7)
Definition: vp8.c:1525
static int vp8_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.c:2536
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:178
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:3062
const char * r
Definition: vf_curves.c:107
VP8 compatible video decoder.
void(* vp8_v_loop_filter8uv)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:52
static const uint8_t vp8_mbfirstidx[4][16]
Definition: vp8data.h:135
AVCodecContext * avctx
Definition: vp8.h:145
#define CONFIG_VP8_DECODER
Definition: config.h:825
#define EDGE_EMU_LINESIZE
Definition: vp8.h:127
uint16_t inter_dc_pred[2][2]
Interframe DC prediction (VP7) [0] VP56_FRAME_PREVIOUS [1] VP56_FRAME_GOLDEN.
Definition: vp8.h:300
VideoDSPContext vdsp
Definition: vp8.h:268
const char * name
Name of the codec implementation.
Definition: avcodec.h:3489
VP8Macroblock * macroblocks_base
Definition: vp8.h:250
static av_always_inline void vp8_mc_part(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], ThreadFrame *ref_frame, int x_off, int y_off, int bx_off, int by_off, int block_w, int block_h, int width, int height, VP56mv *mv)
Definition: vp8.c:1841
static const uint8_t vp8_pred4x4_prob_inter[9]
Definition: vp8data.h:192
uint8_t edge_emu_buffer[21 *EDGE_EMU_LINESIZE]
Definition: vp8.h:128
int16_t block[6][4][16]
Definition: vp8.h:102
static av_always_inline int decode_block_coeffs_internal(VP56RangeCoder *r, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS-1], int i, uint8_t *token_prob, int16_t qmul[2], const uint8_t scan[16], int vp7)
Definition: vp8.c:1261
static const int vp7_mode_contexts[31][4]
Definition: vp8data.h:84
static void vp8_filter_mb_row(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.c:2491
static void vp7_get_quants(VP8Context *s)
Definition: vp8.c:286
#define FFMAX(a, b)
Definition: common.h:90
Libavcodec external API header.
uint8_t keyframe
Definition: vp8.h:156
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: avcodec.h:920
int x
Definition: vp8.h:138
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:217
VP56Frame
Definition: vp56.h:39
int(* decode_mb_row_no_filter)(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.h:285
int16_t luma_qmul[2]
Definition: vp8.h:197
static const uint8_t vp8_pred16x16_prob_inter[4]
Definition: vp8data.h:164
Definition: hls.c:68
useful rectangle filling function
int ff_vp56_init_range_decoder(VP56RangeCoder *c, const uint8_t *buf, int buf_size)
Definition: vp56rac.c:40
#define MAX_THREADS
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:38
static av_always_inline void update(SilenceDetectContext *s, AVFrame *insamples, int is_silence, int64_t nb_samples_notify, AVRational time_base)
4x4 blocks of 4x4px each
Definition: vp8.h:76
uint8_t deblock_filter
Definition: vp8.h:157
#define H_LOOP_FILTER_16Y_INNER(cond)
#define FFMIN(a, b)
Definition: common.h:92
float y
uint8_t feature_present_prob[4]
Definition: vp8.h:306
static av_always_inline void vp8_mc_chroma(VP8Context *s, VP8ThreadData *td, uint8_t *dst1, uint8_t *dst2, ThreadFrame *ref, const VP56mv *mv, int x_off, int y_off, int block_w, int block_h, int width, int height, ptrdiff_t linesize, vp8_mc_func mc_func[3][3])
chroma MC function
Definition: vp8.c:1791
struct VP8Context::@104 prob[2]
These are all of the updatable probabilities for binary decisions.
uint8_t fullrange
whether we can skip clamping in dsp functions
Definition: vp8.h:275
int16_t block_dc[16]
Definition: vp8.h:103
static av_unused int vp8_rac_get_sint(VP56RangeCoder *c, int bits)
Definition: vp56.h:346
int width
picture width / height.
Definition: avcodec.h:1691
uint8_t mbskip
Definition: vp8.h:239
int8_t ref[4]
filter strength adjustment for macroblocks that reference: [0] - intra / VP56_FRAME_CURRENT [1] - VP5...
Definition: vp8.h:223
static int vp7_decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.c:2414
void(* filter_mb_row)(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.h:286
void(* vp8_idct_dc_add4y)(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride)
Definition: vp8dsp.h:42
static av_cold int vp8_init_frames(VP8Context *s)
Definition: vp8.c:2732
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
static void free_buffers(VP8Context *s)
Definition: vp8.c:48
int32_t
#define check_thread_pos(td, otd, mb_x_check, mb_y_check)
Definition: vp8.c:2282
static av_always_inline int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
Definition: os2threads.h:87
static int vp8_read_mv_component(VP56RangeCoder *c, const uint8_t *p)
Definition: vp8.c:820
void(* vp8_mc_func)(uint8_t *dst, ptrdiff_t dstStride, uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
Definition: vp8dsp.h:33
int16_t luma_dc_qmul[2]
luma dc-only block quant
Definition: vp8.h:198
int16_t chroma_qmul[2]
Definition: vp8.h:199
static const uint8_t vp8_pred4x4_prob_intra[10][10][9]
Definition: vp8data.h:196
#define AV_RL32
Definition: intreadwrite.h:146
uint8_t(* top_border)[16+8+8]
Definition: vp8.h:226
float u
int n
Definition: avisynth_c.h:547
ThreadFrame tf
Definition: vp8.h:133
static av_always_inline int decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr, int is_vp7)
Definition: vp8.c:2286
static av_always_inline void filter_level_for_mb(VP8Context *s, VP8Macroblock *mb, VP8FilterStrength *f, int is_vp7)
Definition: vp8.c:2048
static const int8_t vp7_feature_index_tree[4][2]
Definition: vp8data.h:779
static const uint8_t vp7_feature_value_size[2][4]
Definition: vp8data.h:774
#define vp56_rac_get_prob
Definition: vp56.h:264
static void vp8_decode_flush_impl(AVCodecContext *avctx, int free_mem)
Definition: vp8.c:106
static av_always_inline void decode_mb_coeffs(VP8Context *s, VP8ThreadData *td, VP56RangeCoder *c, VP8Macroblock *mb, uint8_t t_nnz[9], uint8_t l_nnz[9], int is_vp7)
Definition: vp8.c:1395
#define LEFT_DC_PRED8x8
Definition: h264pred.h:74
static void vp8_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *cur_frame, VP8Frame *prev_frame)
Definition: vp8.c:2240
uint8_t segment
Definition: vp8.h:94
#define FF_ARRAY_ELEMS(a)
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
Definition: avcodec.h:3043
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:544
struct VP8Context::@101 filter
int8_t xoffset
Definition: vp8data.h:63
static const float pred[4]
Definition: siprdata.h:259
static int vp7_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.c:2530
#define IS_VP8
Definition: vp8dsp.h:104
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: avcodec.h:924
static const int8_t mv[256][2]
Definition: 4xm.c:77
static void vp7_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *cur_frame, VP8Frame *prev_frame)
Definition: vp8.c:2234
static av_always_inline int check_intra_pred8x8_mode_emuedge(int mode, int mb_x, int mb_y, int vp7)
Definition: vp8.c:1534
static av_always_inline int vp56_rac_get_prob_branchy(VP56RangeCoder *c, int prob)
Definition: vp56.h:281
void(* vp8_v_loop_filter8uv_inner)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:62
void(* vp8_h_loop_filter_simple)(uint8_t *dst, ptrdiff_t stride, int flim)
Definition: vp8dsp.h:70
#define src1
Definition: h264pred.c:139
static av_always_inline void inter_predict(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], VP8Macroblock *mb, int mb_x, int mb_y)
Apply motion vectors to prediction buffer, chapter 18.
Definition: vp8.c:1902
VP8Frame * curframe
Definition: vp8.h:148
uint8_t simple
Definition: vp8.h:180
AVS_Value src
Definition: avisynth_c.h:482
void(* vp8_idct_add)(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
Definition: vp8dsp.h:40
VP8Frame frames[5]
Definition: vp8.h:272
static const uint8_t vp8_pred8x8c_prob_intra[3]
Definition: vp8data.h:186
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:3055
uint8_t level
Definition: vp8.h:181
static const uint8_t zigzag_scan[16+1]
Definition: h264data.h:54
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:199
static void vp8_release_frame(VP8Context *s, VP8Frame *f)
Definition: vp8.c:81
AVBufferRef * seg_map
Definition: vp8.h:134
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
static const uint16_t vp7_yac_qlookup[]
Definition: vp8data.h:798
main external API structure.
Definition: avcodec.h:1512
static int vp7_fade_frame(VP8Context *s, VP56RangeCoder *c)
Definition: vp8.c:451
uint8_t * data
The data buffer.
Definition: buffer.h:89
VP8Frame * next_framep[4]
Definition: vp8.h:147
int mb_layout
This describes the macroblock memory layout.
Definition: vp8.h:283
uint8_t left_nnz[9]
For coeff decode, we need to know whether the above block had non-zero coefficients.
Definition: vp8.h:118
static const uint8_t vp8_mbsplit_prob[3]
Definition: vp8data.h:145
VP56RangeCoder c
header context, includes mb modes and motion vectors
Definition: vp8.h:229
void * buf
Definition: avisynth_c.h:553
int y
Definition: vp8.h:139
void(* pred16x16[4+3+2])(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.h:98
VP56RangeCoder coeff_partition[8]
Definition: vp8.h:267
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:82
static const int8_t vp8_pred16x16_tree_inter[4][2]
Definition: vp8data.h:54
BYTE int const BYTE int int int height
Definition: avisynth_c.h:676
int vp7
Definition: vp8.h:288
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:3054
static int setup_partitions(VP8Context *s, const uint8_t *buf, int buf_size)
Definition: vp8.c:259
int coded_height
Definition: avcodec.h:1706
static int vp8_update_dimensions(VP8Context *s, int width, int height)
Definition: vp8.c:208
int index
Definition: gxfenc.c:89
VP8FilterStrength * filter_strength
Definition: vp8.h:129
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:2240
VP8intmv mv_min
Definition: vp8.h:160
static av_always_inline void clamp_mv(VP8Context *s, VP56mv *dst, const VP56mv *src)
Definition: vp8.c:776
void(* vp8_idct_dc_add4uv)(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride)
Definition: vp8dsp.h:44
static av_always_inline int check_dc_pred8x8_mode(int mode, int mb_x, int mb_y)
Definition: vp8.c:1516
static void vp78_update_probability_tables(VP8Context *s)
Definition: vp8.c:376
#define MV_EDGE_CHECK(n)
static const int8_t vp8_pred4x4_tree[9][2]
Definition: vp8data.h:168
uint8_t enabled
whether each mb can have a different strength based on mode/ref
Definition: vp8.h:172
struct VP8Context::@102 qmat[4]
Macroblocks can have one of 4 different quants in a frame when segmentation is enabled.
static av_always_inline void idct_mb(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], VP8Macroblock *mb)
Definition: vp8.c:1986
static void vp78_update_pred16x16_pred8x8_mvc_probabilities(VP8Context *s, int mvc_size)
Definition: vp8.c:395
static av_always_inline int read_mv_component(VP56RangeCoder *c, const uint8_t *p, int vp7)
Motion vector coding, 17.1.
Definition: vp8.c:787
static const uint8_t subpel_idx[3][8]
Definition: vp8.c:1709
int uvlinesize
Definition: vp8.h:154
static void update_refs(VP8Context *s)
Definition: vp8.c:415
static av_always_inline int vp8_rac_get_coeff(VP56RangeCoder *c, const uint8_t *prob)
Definition: vp56.h:403
static const uint8_t vp8_coeff_band[16]
Definition: vp8data.h:325
int allocate_progress
Whether to allocate progress for frame threading.
Definition: internal.h:115
static const uint16_t vp8_ac_qlookup[VP8_MAX_QUANT+1]
Definition: vp8data.h:730
static const uint8_t vp8_pred16x16_prob_intra[4]
Definition: vp8data.h:161
uint8_t score
Definition: vp8data.h:65
static av_always_inline void decode_intra4x4_modes(VP8Context *s, VP56RangeCoder *c, VP8Macroblock *mb, int mb_x, int keyframe, int layout)
Definition: vp8.c:1135
static int vp8_rac_get_uint(VP56RangeCoder *c, int bits)
Definition: vp56.h:334
#define DC_127_PRED8x8
Definition: h264pred.h:85
void(* vp8_luma_dc_wht)(int16_t block[4][4][16], int16_t dc[16])
Definition: vp8dsp.h:38
Definition: vp56.h:65
av_cold int ff_vp8_decode_init(AVCodecContext *avctx)
Definition: vp8.c:2787
#define AV_RL24
Definition: intreadwrite.h:78
int update_altref
Definition: vp8.h:254
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:182
VP8intmv mv_max
Definition: vp8.h:161
uint8_t feature_enabled[4]
Macroblock features (VP7)
Definition: vp8.h:305
int8_t mode[VP8_MVMODE_SPLIT+1]
filter strength adjustment for the following macroblock modes: [0-3] - i16x16 (always zero) [4] - i4x...
Definition: vp8.h:214
2 8x16 blocks (horizontal)
Definition: vp8.h:74
av_cold int ff_vp8_decode_free(AVCodecContext *avctx)
Definition: vp8.c:2717
static av_always_inline void backup_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int simple)
Definition: vp8.c:1470
Definition: vp9.h:48
#define AV_ZERO128(d)
Definition: intreadwrite.h:622
uint8_t pred8x8c[3]
Definition: vp8.h:244
the normal 219*2^(n-8) "MPEG" YUV ranges
Definition: pixfmt.h:543
static int decode(AVCodecContext *avctx, void *data, int *got_sub, AVPacket *avpkt)
Definition: ccaption_dec.c:521
discard all non reference
Definition: avcodec.h:685
static av_always_inline void vp78_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *curframe, VP8Frame *prev_frame, int is_vp7)
Definition: vp8.c:2202
uint8_t partitioning
Definition: vp8.h:92
#define AV_ZERO64(d)
Definition: intreadwrite.h:618
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:67
static av_always_inline void decode_mb_mode(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, uint8_t *segment, uint8_t *ref, int layout, int is_vp7)
Definition: vp8.c:1170
void(* vp8_v_loop_filter_simple)(uint8_t *dst, ptrdiff_t stride, int flim)
Definition: vp8dsp.h:69
int16_t x
Definition: vp56.h:66
common internal api header.
static void vp8_get_quants(VP8Context *s)
Definition: vp8.c:305
if(ret< 0)
Definition: vf_mcdeint.c:280
#define LOCAL_ALIGNED(a, t, v,...)
Definition: internal.h:112
#define AV_COPY128(d, s)
Definition: intreadwrite.h:594
static int ref_frame(Vp3DecodeContext *s, ThreadFrame *dst, ThreadFrame *src)
Definition: vp3.c:1927
int wait_mb_pos
Definition: vp8.h:125
static int vp8_alloc_frame(VP8Context *s, VP8Frame *f, int ref)
Definition: vp8.c:68
uint8_t chroma_pred_mode
Definition: vp8.h:93
static double c[64]
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:92
static av_always_inline int pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr)
Definition: os2threads.h:115
#define DC_129_PRED8x8
Definition: h264pred.h:86
enum AVDiscard skip_loop_filter
Skip loop filtering for selected frames.
Definition: avcodec.h:3222
static av_always_inline int vp8_rac_get(VP56RangeCoder *c)
Definition: vp56.h:318
int invisible
Definition: vp8.h:251
static av_always_inline int decode_splitmvs(VP8Context *s, VP56RangeCoder *c, VP8Macroblock *mb, int layout, int is_vp7)
Split motion vector prediction, 16.4.
Definition: vp8.c:843
static const SiprModeParam modes[MODE_COUNT]
Definition: sipr.c:69
int ref_count[3]
Definition: vp8.h:164
void * priv_data
Definition: avcodec.h:1554
int(* update_thread_context)(AVCodecContext *dst, const AVCodecContext *src)
Copy necessary context variables from a previous thread context to the current one.
Definition: avcodec.h:3537
static av_always_inline int check_tm_pred4x4_mode(int mode, int mb_x, int mb_y, int vp7)
Definition: vp8.c:1550
void(* vp8_h_loop_filter16y)(uint8_t *dst, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:50
#define MODE_I4x4
Definition: vp8.h:64
static int vp7_calculate_mb_offset(int mb_x, int mb_y, int mb_width, int xoffset, int yoffset, int boundary, int *edge_x, int *edge_y)
The vp7 reference decoder uses a padding macroblock column (added to right edge of the frame) to guar...
Definition: vp8.c:925
#define XCHG(a, b, xchg)
int(* execute2)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count)
The codec may call this to execute several independent things.
Definition: avcodec.h:3103
#define update_pos(td, mb_y, mb_x)
Definition: vp8.c:2283
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:1562
#define HOR_VP8_PRED
unaveraged version of HOR_PRED, see
Definition: h264pred.h:63
VP8DSPContext vp8dsp
Definition: vp8.h:269
static av_always_inline int update_dimensions(VP8Context *s, int width, int height, int is_vp7)
Definition: vp8.c:149
int thread_nr
Definition: vp8.h:119
#define AV_ZERO32(d)
Definition: intreadwrite.h:614
static av_always_inline int vp78_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr, int is_vp7)
Definition: vp8.c:2498
static const double coeff[2][5]
Definition: vf_owdenoise.c:71
void(* vp8_idct_dc_add)(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
Definition: vp8dsp.h:41
uint64_t layout
AVDiscard
Definition: avcodec.h:680
static av_unused int vp8_rac_get_nn(VP56RangeCoder *c)
Definition: vp56.h:368
void(* vp8_v_loop_filter16y_inner)(uint8_t *dst, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:58
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> dc
#define av_uninit(x)
Definition: attributes.h:141
static av_always_inline void vp8_mc_luma(VP8Context *s, VP8ThreadData *td, uint8_t *dst, ThreadFrame *ref, const VP56mv *mv, int x_off, int y_off, int block_w, int block_h, int width, int height, ptrdiff_t linesize, vp8_mc_func mc_func[3][3])
luma MC function
Definition: vp8.c:1733
static const uint8_t vp8_token_update_probs[4][8][3][NUM_DCT_TOKENS-1]
Definition: vp8data.h:540
static av_always_inline void filter_mb(VP8Context *s, uint8_t *dst[3], VP8FilterStrength *f, int mb_x, int mb_y, int is_vp7)
Definition: vp8.c:2081
#define av_freep(p)
#define IS_VP7
Definition: vp8dsp.h:103
static int init_thread_copy(AVCodecContext *avctx)
Definition: alac.c:646
#define av_always_inline
Definition: attributes.h:37
int8_t filter_level[4]
base loop filter level
Definition: vp8.h:176
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:170
static const int vp8_mode_contexts[6][4]
Definition: vp8data.h:118
static const uint8_t vp8_dct_cat1_prob[]
Definition: vp8data.h:342
#define FFSWAP(type, a, b)
Definition: common.h:95
uint8_t intra
Definition: vp8.h:240
static av_always_inline void vp8_decode_mvs(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, int layout)
Definition: vp8.c:1035
uint8_t non_zero_count_cache[6][4]
This is the index plus one of the last non-zero coeff for each of the blocks in the current macrobloc...
Definition: vp8.h:111
uint8_t skip
Definition: vp8.h:87
void ff_vp8dsp_init(VP8DSPContext *c)
static void vp78_reset_probability_tables(VP8Context *s)
Definition: vp8.c:367
This structure stores compressed data.
Definition: avcodec.h:1410
#define VP8_MVC_SIZE
Definition: vp8.c:393
static int vp7_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
Definition: vp8.c:490
uint8_t profile
Definition: vp8.h:159
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:1216
const uint8_t *const ff_vp8_dct_cat_prob[]
Definition: vp8data.h:362
struct VP8Context::@103 lf_delta
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:252
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:857
VP8ThreadData * thread_data
Definition: vp8.h:144
Predicted.
Definition: avutil.h:267
int thread_mb_pos
Definition: vp8.h:124
2x2 blocks of 8x8px each
Definition: vp8.h:75
static av_always_inline void filter_mb_simple(VP8Context *s, uint8_t *dst, VP8FilterStrength *f, int mb_x, int mb_y)
Definition: vp8.c:2168
static const VP7MVPred vp7_mv_pred[VP7_MV_PRED_COUNT]
Definition: vp8data.h:69
static const uint16_t vp7_y2ac_qlookup[]
Definition: vp8data.h:824
static const uint8_t vp7_submv_prob[3]
Definition: vp8data.h:149
static av_always_inline int vp78_decode_init(AVCodecContext *avctx, int is_vp7)
Definition: vp8.c:2744
#define AV_WN64(p, v)
Definition: intreadwrite.h:380
uint8_t filter_level
Definition: vp8.h:81
static int width
static int16_t block[64]
Definition: dct-test.c:110