FFmpeg  3.4.9
vp8.c
Go to the documentation of this file.
1 /*
2  * VP7/VP8 compatible video decoder
3  *
4  * Copyright (C) 2010 David Conrad
5  * Copyright (C) 2010 Ronald S. Bultje
6  * Copyright (C) 2010 Fiona Glaser
7  * Copyright (C) 2012 Daniel Kang
8  * Copyright (C) 2014 Peter Ross
9  *
10  * This file is part of FFmpeg.
11  *
12  * FFmpeg is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU Lesser General Public
14  * License as published by the Free Software Foundation; either
15  * version 2.1 of the License, or (at your option) any later version.
16  *
17  * FFmpeg is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20  * Lesser General Public License for more details.
21  *
22  * You should have received a copy of the GNU Lesser General Public
23  * License along with FFmpeg; if not, write to the Free Software
24  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25  */
26 
27 #include "libavutil/imgutils.h"
28 
29 #include "avcodec.h"
30 #include "internal.h"
31 #include "mathops.h"
32 #include "rectangle.h"
33 #include "thread.h"
34 #include "vp8.h"
35 #include "vp8data.h"
36 
37 #if ARCH_ARM
38 # include "arm/vp8.h"
39 #endif
40 
41 #if CONFIG_VP7_DECODER && CONFIG_VP8_DECODER
42 #define VPX(vp7, f) (vp7 ? vp7_ ## f : vp8_ ## f)
43 #elif CONFIG_VP7_DECODER
44 #define VPX(vp7, f) vp7_ ## f
45 #else // CONFIG_VP8_DECODER
46 #define VPX(vp7, f) vp8_ ## f
47 #endif
48 
49 static void free_buffers(VP8Context *s)
50 {
51  int i;
52  if (s->thread_data)
53  for (i = 0; i < MAX_THREADS; i++) {
54 #if HAVE_THREADS
55  pthread_cond_destroy(&s->thread_data[i].cond);
57 #endif
59  }
60  av_freep(&s->thread_data);
63  av_freep(&s->top_nnz);
64  av_freep(&s->top_border);
65 
66  s->macroblocks = NULL;
67 }
68 
69 static int vp8_alloc_frame(VP8Context *s, VP8Frame *f, int ref)
70 {
71  int ret;
72  if ((ret = ff_thread_get_buffer(s->avctx, &f->tf,
73  ref ? AV_GET_BUFFER_FLAG_REF : 0)) < 0)
74  return ret;
75  if (!(f->seg_map = av_buffer_allocz(s->mb_width * s->mb_height))) {
77  return AVERROR(ENOMEM);
78  }
79  return 0;
80 }
81 
83 {
86 }
87 
88 #if CONFIG_VP8_DECODER
89 static int vp8_ref_frame(VP8Context *s, VP8Frame *dst, VP8Frame *src)
90 {
91  int ret;
92 
93  vp8_release_frame(s, dst);
94 
95  if ((ret = ff_thread_ref_frame(&dst->tf, &src->tf)) < 0)
96  return ret;
97  if (src->seg_map &&
98  !(dst->seg_map = av_buffer_ref(src->seg_map))) {
99  vp8_release_frame(s, dst);
100  return AVERROR(ENOMEM);
101  }
102 
103  return 0;
104 }
105 #endif /* CONFIG_VP8_DECODER */
106 
107 static void vp8_decode_flush_impl(AVCodecContext *avctx, int free_mem)
108 {
109  VP8Context *s = avctx->priv_data;
110  int i;
111 
112  for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++)
113  vp8_release_frame(s, &s->frames[i]);
114  memset(s->framep, 0, sizeof(s->framep));
115 
116  if (free_mem)
117  free_buffers(s);
118 }
119 
120 static void vp8_decode_flush(AVCodecContext *avctx)
121 {
122  vp8_decode_flush_impl(avctx, 0);
123 }
124 
126 {
127  VP8Frame *frame = NULL;
128  int i;
129 
130  // find a free buffer
131  for (i = 0; i < 5; i++)
132  if (&s->frames[i] != s->framep[VP56_FRAME_CURRENT] &&
133  &s->frames[i] != s->framep[VP56_FRAME_PREVIOUS] &&
134  &s->frames[i] != s->framep[VP56_FRAME_GOLDEN] &&
135  &s->frames[i] != s->framep[VP56_FRAME_GOLDEN2]) {
136  frame = &s->frames[i];
137  break;
138  }
139  if (i == 5) {
140  av_log(s->avctx, AV_LOG_FATAL, "Ran out of free frames!\n");
141  abort();
142  }
143  if (frame->tf.f->data[0])
144  vp8_release_frame(s, frame);
145 
146  return frame;
147 }
148 
149 static av_always_inline
150 int update_dimensions(VP8Context *s, int width, int height, int is_vp7)
151 {
152  AVCodecContext *avctx = s->avctx;
153  int i, ret;
154 
155  if (width != s->avctx->width || ((width+15)/16 != s->mb_width || (height+15)/16 != s->mb_height) && s->macroblocks_base ||
156  height != s->avctx->height) {
158 
159  ret = ff_set_dimensions(s->avctx, width, height);
160  if (ret < 0)
161  return ret;
162  }
163 
164  s->mb_width = (s->avctx->coded_width + 15) / 16;
165  s->mb_height = (s->avctx->coded_height + 15) / 16;
166 
167  s->mb_layout = is_vp7 || avctx->active_thread_type == FF_THREAD_SLICE &&
168  avctx->thread_count > 1;
169  if (!s->mb_layout) { // Frame threading and one thread
170  s->macroblocks_base = av_mallocz((s->mb_width + s->mb_height * 2 + 1) *
171  sizeof(*s->macroblocks));
173  } else // Sliced threading
174  s->macroblocks_base = av_mallocz((s->mb_width + 2) * (s->mb_height + 2) *
175  sizeof(*s->macroblocks));
176  s->top_nnz = av_mallocz(s->mb_width * sizeof(*s->top_nnz));
177  s->top_border = av_mallocz((s->mb_width + 1) * sizeof(*s->top_border));
179 
180  if (!s->macroblocks_base || !s->top_nnz || !s->top_border ||
181  !s->thread_data || (!s->intra4x4_pred_mode_top && !s->mb_layout)) {
182  free_buffers(s);
183  return AVERROR(ENOMEM);
184  }
185 
186  for (i = 0; i < MAX_THREADS; i++) {
188  av_mallocz(s->mb_width * sizeof(*s->thread_data[0].filter_strength));
189  if (!s->thread_data[i].filter_strength) {
190  free_buffers(s);
191  return AVERROR(ENOMEM);
192  }
193 #if HAVE_THREADS
194  pthread_mutex_init(&s->thread_data[i].lock, NULL);
195  pthread_cond_init(&s->thread_data[i].cond, NULL);
196 #endif
197  }
198 
199  s->macroblocks = s->macroblocks_base + 1;
200 
201  return 0;
202 }
203 
205 {
206  return update_dimensions(s, width, height, IS_VP7);
207 }
208 
210 {
211  return update_dimensions(s, width, height, IS_VP8);
212 }
213 
214 
216 {
217  VP56RangeCoder *c = &s->c;
218  int i;
219 
221 
222  if (vp8_rac_get(c)) { // update segment feature data
224 
225  for (i = 0; i < 4; i++)
227 
228  for (i = 0; i < 4; i++)
230  }
231  if (s->segmentation.update_map)
232  for (i = 0; i < 3; i++)
233  s->prob->segmentid[i] = vp8_rac_get(c) ? vp8_rac_get_uint(c, 8) : 255;
234 }
235 
237 {
238  VP56RangeCoder *c = &s->c;
239  int i;
240 
241  for (i = 0; i < 4; i++) {
242  if (vp8_rac_get(c)) {
243  s->lf_delta.ref[i] = vp8_rac_get_uint(c, 6);
244 
245  if (vp8_rac_get(c))
246  s->lf_delta.ref[i] = -s->lf_delta.ref[i];
247  }
248  }
249 
250  for (i = MODE_I4x4; i <= VP8_MVMODE_SPLIT; i++) {
251  if (vp8_rac_get(c)) {
252  s->lf_delta.mode[i] = vp8_rac_get_uint(c, 6);
253 
254  if (vp8_rac_get(c))
255  s->lf_delta.mode[i] = -s->lf_delta.mode[i];
256  }
257  }
258 }
259 
260 static int setup_partitions(VP8Context *s, const uint8_t *buf, int buf_size)
261 {
262  const uint8_t *sizes = buf;
263  int i;
264  int ret;
265 
266  s->num_coeff_partitions = 1 << vp8_rac_get_uint(&s->c, 2);
267 
268  buf += 3 * (s->num_coeff_partitions - 1);
269  buf_size -= 3 * (s->num_coeff_partitions - 1);
270  if (buf_size < 0)
271  return -1;
272 
273  for (i = 0; i < s->num_coeff_partitions - 1; i++) {
274  int size = AV_RL24(sizes + 3 * i);
275  if (buf_size - size < 0)
276  return -1;
277 
278  ret = ff_vp56_init_range_decoder(&s->coeff_partition[i], buf, size);
279  if (ret < 0)
280  return ret;
281  buf += size;
282  buf_size -= size;
283  }
284  return ff_vp56_init_range_decoder(&s->coeff_partition[i], buf, buf_size);
285 }
286 
287 static void vp7_get_quants(VP8Context *s)
288 {
289  VP56RangeCoder *c = &s->c;
290 
291  int yac_qi = vp8_rac_get_uint(c, 7);
292  int ydc_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
293  int y2dc_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
294  int y2ac_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
295  int uvdc_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
296  int uvac_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
297 
298  s->qmat[0].luma_qmul[0] = vp7_ydc_qlookup[ydc_qi];
299  s->qmat[0].luma_qmul[1] = vp7_yac_qlookup[yac_qi];
300  s->qmat[0].luma_dc_qmul[0] = vp7_y2dc_qlookup[y2dc_qi];
301  s->qmat[0].luma_dc_qmul[1] = vp7_y2ac_qlookup[y2ac_qi];
302  s->qmat[0].chroma_qmul[0] = FFMIN(vp7_ydc_qlookup[uvdc_qi], 132);
303  s->qmat[0].chroma_qmul[1] = vp7_yac_qlookup[uvac_qi];
304 }
305 
306 static void vp8_get_quants(VP8Context *s)
307 {
308  VP56RangeCoder *c = &s->c;
309  int i, base_qi;
310 
311  int yac_qi = vp8_rac_get_uint(c, 7);
312  int ydc_delta = vp8_rac_get_sint(c, 4);
313  int y2dc_delta = vp8_rac_get_sint(c, 4);
314  int y2ac_delta = vp8_rac_get_sint(c, 4);
315  int uvdc_delta = vp8_rac_get_sint(c, 4);
316  int uvac_delta = vp8_rac_get_sint(c, 4);
317 
318  for (i = 0; i < 4; i++) {
319  if (s->segmentation.enabled) {
320  base_qi = s->segmentation.base_quant[i];
321  if (!s->segmentation.absolute_vals)
322  base_qi += yac_qi;
323  } else
324  base_qi = yac_qi;
325 
326  s->qmat[i].luma_qmul[0] = vp8_dc_qlookup[av_clip_uintp2(base_qi + ydc_delta, 7)];
327  s->qmat[i].luma_qmul[1] = vp8_ac_qlookup[av_clip_uintp2(base_qi, 7)];
328  s->qmat[i].luma_dc_qmul[0] = vp8_dc_qlookup[av_clip_uintp2(base_qi + y2dc_delta, 7)] * 2;
329  /* 101581>>16 is equivalent to 155/100 */
330  s->qmat[i].luma_dc_qmul[1] = vp8_ac_qlookup[av_clip_uintp2(base_qi + y2ac_delta, 7)] * 101581 >> 16;
331  s->qmat[i].chroma_qmul[0] = vp8_dc_qlookup[av_clip_uintp2(base_qi + uvdc_delta, 7)];
332  s->qmat[i].chroma_qmul[1] = vp8_ac_qlookup[av_clip_uintp2(base_qi + uvac_delta, 7)];
333 
334  s->qmat[i].luma_dc_qmul[1] = FFMAX(s->qmat[i].luma_dc_qmul[1], 8);
335  s->qmat[i].chroma_qmul[0] = FFMIN(s->qmat[i].chroma_qmul[0], 132);
336  }
337 }
338 
339 /**
340  * Determine which buffers golden and altref should be updated with after this frame.
341  * The spec isn't clear here, so I'm going by my understanding of what libvpx does
342  *
343  * Intra frames update all 3 references
344  * Inter frames update VP56_FRAME_PREVIOUS if the update_last flag is set
345  * If the update (golden|altref) flag is set, it's updated with the current frame
346  * if update_last is set, and VP56_FRAME_PREVIOUS otherwise.
347  * If the flag is not set, the number read means:
348  * 0: no update
349  * 1: VP56_FRAME_PREVIOUS
350  * 2: update golden with altref, or update altref with golden
351  */
353 {
354  VP56RangeCoder *c = &s->c;
355 
356  if (update)
357  return VP56_FRAME_CURRENT;
358 
359  switch (vp8_rac_get_uint(c, 2)) {
360  case 1:
361  return VP56_FRAME_PREVIOUS;
362  case 2:
364  }
365  return VP56_FRAME_NONE;
366 }
367 
369 {
370  int i, j;
371  for (i = 0; i < 4; i++)
372  for (j = 0; j < 16; j++)
373  memcpy(s->prob->token[i][j], vp8_token_default_probs[i][vp8_coeff_band[j]],
374  sizeof(s->prob->token[i][j]));
375 }
376 
378 {
379  VP56RangeCoder *c = &s->c;
380  int i, j, k, l, m;
381 
382  for (i = 0; i < 4; i++)
383  for (j = 0; j < 8; j++)
384  for (k = 0; k < 3; k++)
385  for (l = 0; l < NUM_DCT_TOKENS-1; l++)
387  int prob = vp8_rac_get_uint(c, 8);
388  for (m = 0; vp8_coeff_band_indexes[j][m] >= 0; m++)
389  s->prob->token[i][vp8_coeff_band_indexes[j][m]][k][l] = prob;
390  }
391 }
392 
393 #define VP7_MVC_SIZE 17
394 #define VP8_MVC_SIZE 19
395 
397  int mvc_size)
398 {
399  VP56RangeCoder *c = &s->c;
400  int i, j;
401 
402  if (vp8_rac_get(c))
403  for (i = 0; i < 4; i++)
404  s->prob->pred16x16[i] = vp8_rac_get_uint(c, 8);
405  if (vp8_rac_get(c))
406  for (i = 0; i < 3; i++)
407  s->prob->pred8x8c[i] = vp8_rac_get_uint(c, 8);
408 
409  // 17.2 MV probability update
410  for (i = 0; i < 2; i++)
411  for (j = 0; j < mvc_size; j++)
413  s->prob->mvc[i][j] = vp8_rac_get_nn(c);
414 }
415 
416 static void update_refs(VP8Context *s)
417 {
418  VP56RangeCoder *c = &s->c;
419 
420  int update_golden = vp8_rac_get(c);
421  int update_altref = vp8_rac_get(c);
422 
423  s->update_golden = ref_to_update(s, update_golden, VP56_FRAME_GOLDEN);
424  s->update_altref = ref_to_update(s, update_altref, VP56_FRAME_GOLDEN2);
425 }
426 
427 static void copy_chroma(AVFrame *dst, AVFrame *src, int width, int height)
428 {
429  int i, j;
430 
431  for (j = 1; j < 3; j++) {
432  for (i = 0; i < height / 2; i++)
433  memcpy(dst->data[j] + i * dst->linesize[j],
434  src->data[j] + i * src->linesize[j], width / 2);
435  }
436 }
437 
438 static void fade(uint8_t *dst, ptrdiff_t dst_linesize,
439  const uint8_t *src, ptrdiff_t src_linesize,
440  int width, int height,
441  int alpha, int beta)
442 {
443  int i, j;
444  for (j = 0; j < height; j++) {
445  for (i = 0; i < width; i++) {
446  uint8_t y = src[j * src_linesize + i];
447  dst[j * dst_linesize + i] = av_clip_uint8(y + ((y * beta) >> 8) + alpha);
448  }
449  }
450 }
451 
453 {
454  int alpha = (int8_t) vp8_rac_get_uint(c, 8);
455  int beta = (int8_t) vp8_rac_get_uint(c, 8);
456  int ret;
457 
458  if (!s->keyframe && (alpha || beta)) {
459  int width = s->mb_width * 16;
460  int height = s->mb_height * 16;
461  AVFrame *src, *dst;
462 
463  if (!s->framep[VP56_FRAME_PREVIOUS] ||
464  !s->framep[VP56_FRAME_GOLDEN]) {
465  av_log(s->avctx, AV_LOG_WARNING, "Discarding interframe without a prior keyframe!\n");
466  return AVERROR_INVALIDDATA;
467  }
468 
469  dst =
470  src = s->framep[VP56_FRAME_PREVIOUS]->tf.f;
471 
472  /* preserve the golden frame, write a new previous frame */
475  if ((ret = vp8_alloc_frame(s, s->framep[VP56_FRAME_PREVIOUS], 1)) < 0)
476  return ret;
477 
478  dst = s->framep[VP56_FRAME_PREVIOUS]->tf.f;
479 
480  copy_chroma(dst, src, width, height);
481  }
482 
483  fade(dst->data[0], dst->linesize[0],
484  src->data[0], src->linesize[0],
485  width, height, alpha, beta);
486  }
487 
488  return 0;
489 }
490 
491 static int vp7_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
492 {
493  VP56RangeCoder *c = &s->c;
494  int part1_size, hscale, vscale, i, j, ret;
495  int width = s->avctx->width;
496  int height = s->avctx->height;
497 
498  if (buf_size < 4) {
499  return AVERROR_INVALIDDATA;
500  }
501 
502  s->profile = (buf[0] >> 1) & 7;
503  if (s->profile > 1) {
504  avpriv_request_sample(s->avctx, "Unknown profile %d", s->profile);
505  return AVERROR_INVALIDDATA;
506  }
507 
508  s->keyframe = !(buf[0] & 1);
509  s->invisible = 0;
510  part1_size = AV_RL24(buf) >> 4;
511 
512  if (buf_size < 4 - s->profile + part1_size) {
513  av_log(s->avctx, AV_LOG_ERROR, "Buffer size %d is too small, needed : %d\n", buf_size, 4 - s->profile + part1_size);
514  return AVERROR_INVALIDDATA;
515  }
516 
517  buf += 4 - s->profile;
518  buf_size -= 4 - s->profile;
519 
520  memcpy(s->put_pixels_tab, s->vp8dsp.put_vp8_epel_pixels_tab, sizeof(s->put_pixels_tab));
521 
522  ret = ff_vp56_init_range_decoder(c, buf, part1_size);
523  if (ret < 0)
524  return ret;
525  buf += part1_size;
526  buf_size -= part1_size;
527 
528  /* A. Dimension information (keyframes only) */
529  if (s->keyframe) {
530  width = vp8_rac_get_uint(c, 12);
531  height = vp8_rac_get_uint(c, 12);
532  hscale = vp8_rac_get_uint(c, 2);
533  vscale = vp8_rac_get_uint(c, 2);
534  if (hscale || vscale)
535  avpriv_request_sample(s->avctx, "Upscaling");
536 
540  sizeof(s->prob->pred16x16));
542  sizeof(s->prob->pred8x8c));
543  for (i = 0; i < 2; i++)
544  memcpy(s->prob->mvc[i], vp7_mv_default_prob[i],
545  sizeof(vp7_mv_default_prob[i]));
546  memset(&s->segmentation, 0, sizeof(s->segmentation));
547  memset(&s->lf_delta, 0, sizeof(s->lf_delta));
548  memcpy(s->prob[0].scan, ff_zigzag_scan, sizeof(s->prob[0].scan));
549  }
550 
551  if (s->keyframe || s->profile > 0)
552  memset(s->inter_dc_pred, 0 , sizeof(s->inter_dc_pred));
553 
554  /* B. Decoding information for all four macroblock-level features */
555  for (i = 0; i < 4; i++) {
556  s->feature_enabled[i] = vp8_rac_get(c);
557  if (s->feature_enabled[i]) {
559 
560  for (j = 0; j < 3; j++)
561  s->feature_index_prob[i][j] =
562  vp8_rac_get(c) ? vp8_rac_get_uint(c, 8) : 255;
563 
564  if (vp7_feature_value_size[s->profile][i])
565  for (j = 0; j < 4; j++)
566  s->feature_value[i][j] =
568  }
569  }
570 
571  s->segmentation.enabled = 0;
572  s->segmentation.update_map = 0;
573  s->lf_delta.enabled = 0;
574 
575  s->num_coeff_partitions = 1;
576  ret = ff_vp56_init_range_decoder(&s->coeff_partition[0], buf, buf_size);
577  if (ret < 0)
578  return ret;
579 
580  if (!s->macroblocks_base || /* first frame */
581  width != s->avctx->width || height != s->avctx->height ||
582  (width + 15) / 16 != s->mb_width || (height + 15) / 16 != s->mb_height) {
583  if ((ret = vp7_update_dimensions(s, width, height)) < 0)
584  return ret;
585  }
586 
587  /* C. Dequantization indices */
588  vp7_get_quants(s);
589 
590  /* D. Golden frame update flag (a Flag) for interframes only */
591  if (!s->keyframe) {
594  }
595 
596  s->update_last = 1;
597  s->update_probabilities = 1;
598  s->fade_present = 1;
599 
600  if (s->profile > 0) {
602  if (!s->update_probabilities)
603  s->prob[1] = s->prob[0];
604 
605  if (!s->keyframe)
606  s->fade_present = vp8_rac_get(c);
607  }
608 
609  if (vpX_rac_is_end(c))
610  return AVERROR_INVALIDDATA;
611  /* E. Fading information for previous frame */
612  if (s->fade_present && vp8_rac_get(c)) {
613  if ((ret = vp7_fade_frame(s ,c)) < 0)
614  return ret;
615  }
616 
617  /* F. Loop filter type */
618  if (!s->profile)
619  s->filter.simple = vp8_rac_get(c);
620 
621  /* G. DCT coefficient ordering specification */
622  if (vp8_rac_get(c))
623  for (i = 1; i < 16; i++)
624  s->prob[0].scan[i] = ff_zigzag_scan[vp8_rac_get_uint(c, 4)];
625 
626  /* H. Loop filter levels */
627  if (s->profile > 0)
628  s->filter.simple = vp8_rac_get(c);
629  s->filter.level = vp8_rac_get_uint(c, 6);
630  s->filter.sharpness = vp8_rac_get_uint(c, 3);
631 
632  /* I. DCT coefficient probability update; 13.3 Token Probability Updates */
634 
635  s->mbskip_enabled = 0;
636 
637  /* J. The remaining frame header data occurs ONLY FOR INTERFRAMES */
638  if (!s->keyframe) {
639  s->prob->intra = vp8_rac_get_uint(c, 8);
640  s->prob->last = vp8_rac_get_uint(c, 8);
642  }
643 
644  return 0;
645 }
646 
647 static int vp8_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
648 {
649  VP56RangeCoder *c = &s->c;
650  int header_size, hscale, vscale, ret;
651  int width = s->avctx->width;
652  int height = s->avctx->height;
653 
654  if (buf_size < 3) {
655  av_log(s->avctx, AV_LOG_ERROR, "Insufficent data (%d) for header\n", buf_size);
656  return AVERROR_INVALIDDATA;
657  }
658 
659  s->keyframe = !(buf[0] & 1);
660  s->profile = (buf[0]>>1) & 7;
661  s->invisible = !(buf[0] & 0x10);
662  header_size = AV_RL24(buf) >> 5;
663  buf += 3;
664  buf_size -= 3;
665 
666  if (s->profile > 3)
667  av_log(s->avctx, AV_LOG_WARNING, "Unknown profile %d\n", s->profile);
668 
669  if (!s->profile)
671  sizeof(s->put_pixels_tab));
672  else // profile 1-3 use bilinear, 4+ aren't defined so whatever
674  sizeof(s->put_pixels_tab));
675 
676  if (header_size > buf_size - 7 * s->keyframe) {
677  av_log(s->avctx, AV_LOG_ERROR, "Header size larger than data provided\n");
678  return AVERROR_INVALIDDATA;
679  }
680 
681  if (s->keyframe) {
682  if (AV_RL24(buf) != 0x2a019d) {
684  "Invalid start code 0x%x\n", AV_RL24(buf));
685  return AVERROR_INVALIDDATA;
686  }
687  width = AV_RL16(buf + 3) & 0x3fff;
688  height = AV_RL16(buf + 5) & 0x3fff;
689  hscale = buf[4] >> 6;
690  vscale = buf[6] >> 6;
691  buf += 7;
692  buf_size -= 7;
693 
694  if (hscale || vscale)
695  avpriv_request_sample(s->avctx, "Upscaling");
696 
700  sizeof(s->prob->pred16x16));
702  sizeof(s->prob->pred8x8c));
703  memcpy(s->prob->mvc, vp8_mv_default_prob,
704  sizeof(s->prob->mvc));
705  memset(&s->segmentation, 0, sizeof(s->segmentation));
706  memset(&s->lf_delta, 0, sizeof(s->lf_delta));
707  }
708 
709  ret = ff_vp56_init_range_decoder(c, buf, header_size);
710  if (ret < 0)
711  return ret;
712  buf += header_size;
713  buf_size -= header_size;
714 
715  if (s->keyframe) {
716  s->colorspace = vp8_rac_get(c);
717  if (s->colorspace)
718  av_log(s->avctx, AV_LOG_WARNING, "Unspecified colorspace\n");
719  s->fullrange = vp8_rac_get(c);
720  }
721 
722  if ((s->segmentation.enabled = vp8_rac_get(c)))
724  else
725  s->segmentation.update_map = 0; // FIXME: move this to some init function?
726 
727  s->filter.simple = vp8_rac_get(c);
728  s->filter.level = vp8_rac_get_uint(c, 6);
729  s->filter.sharpness = vp8_rac_get_uint(c, 3);
730 
731  if ((s->lf_delta.enabled = vp8_rac_get(c)))
732  if (vp8_rac_get(c))
733  update_lf_deltas(s);
734 
735  if (setup_partitions(s, buf, buf_size)) {
736  av_log(s->avctx, AV_LOG_ERROR, "Invalid partitions\n");
737  return AVERROR_INVALIDDATA;
738  }
739 
740  if (!s->macroblocks_base || /* first frame */
741  width != s->avctx->width || height != s->avctx->height ||
742  (width+15)/16 != s->mb_width || (height+15)/16 != s->mb_height)
743  if ((ret = vp8_update_dimensions(s, width, height)) < 0)
744  return ret;
745 
746  vp8_get_quants(s);
747 
748  if (!s->keyframe) {
749  update_refs(s);
751  s->sign_bias[VP56_FRAME_GOLDEN2 /* altref */] = vp8_rac_get(c);
752  }
753 
754  // if we aren't saving this frame's probabilities for future frames,
755  // make a copy of the current probabilities
756  if (!(s->update_probabilities = vp8_rac_get(c)))
757  s->prob[1] = s->prob[0];
758 
759  s->update_last = s->keyframe || vp8_rac_get(c);
760 
762 
763  if ((s->mbskip_enabled = vp8_rac_get(c)))
764  s->prob->mbskip = vp8_rac_get_uint(c, 8);
765 
766  if (!s->keyframe) {
767  s->prob->intra = vp8_rac_get_uint(c, 8);
768  s->prob->last = vp8_rac_get_uint(c, 8);
769  s->prob->golden = vp8_rac_get_uint(c, 8);
771  }
772 
773  return 0;
774 }
775 
776 static av_always_inline
777 void clamp_mv(VP8mvbounds *s, VP56mv *dst, const VP56mv *src)
778 {
779  dst->x = av_clip(src->x, av_clip(s->mv_min.x, INT16_MIN, INT16_MAX),
780  av_clip(s->mv_max.x, INT16_MIN, INT16_MAX));
781  dst->y = av_clip(src->y, av_clip(s->mv_min.y, INT16_MIN, INT16_MAX),
782  av_clip(s->mv_max.y, INT16_MIN, INT16_MAX));
783 }
784 
785 /**
786  * Motion vector coding, 17.1.
787  */
789 {
790  int bit, x = 0;
791 
792  if (vp56_rac_get_prob_branchy(c, p[0])) {
793  int i;
794 
795  for (i = 0; i < 3; i++)
796  x += vp56_rac_get_prob(c, p[9 + i]) << i;
797  for (i = (vp7 ? 7 : 9); i > 3; i--)
798  x += vp56_rac_get_prob(c, p[9 + i]) << i;
799  if (!(x & (vp7 ? 0xF0 : 0xFFF0)) || vp56_rac_get_prob(c, p[12]))
800  x += 8;
801  } else {
802  // small_mvtree
803  const uint8_t *ps = p + 2;
804  bit = vp56_rac_get_prob(c, *ps);
805  ps += 1 + 3 * bit;
806  x += 4 * bit;
807  bit = vp56_rac_get_prob(c, *ps);
808  ps += 1 + bit;
809  x += 2 * bit;
810  x += vp56_rac_get_prob(c, *ps);
811  }
812 
813  return (x && vp56_rac_get_prob(c, p[1])) ? -x : x;
814 }
815 
817 {
818  return read_mv_component(c, p, 1);
819 }
820 
822 {
823  return read_mv_component(c, p, 0);
824 }
825 
826 static av_always_inline
827 const uint8_t *get_submv_prob(uint32_t left, uint32_t top, int is_vp7)
828 {
829  if (is_vp7)
830  return vp7_submv_prob;
831 
832  if (left == top)
833  return vp8_submv_prob[4 - !!left];
834  if (!top)
835  return vp8_submv_prob[2];
836  return vp8_submv_prob[1 - !!left];
837 }
838 
839 /**
840  * Split motion vector prediction, 16.4.
841  * @returns the number of motion vectors parsed (2, 4 or 16)
842  */
843 static av_always_inline
845  int layout, int is_vp7)
846 {
847  int part_idx;
848  int n, num;
849  VP8Macroblock *top_mb;
850  VP8Macroblock *left_mb = &mb[-1];
851  const uint8_t *mbsplits_left = vp8_mbsplits[left_mb->partitioning];
852  const uint8_t *mbsplits_top, *mbsplits_cur, *firstidx;
853  VP56mv *top_mv;
854  VP56mv *left_mv = left_mb->bmv;
855  VP56mv *cur_mv = mb->bmv;
856 
857  if (!layout) // layout is inlined, s->mb_layout is not
858  top_mb = &mb[2];
859  else
860  top_mb = &mb[-s->mb_width - 1];
861  mbsplits_top = vp8_mbsplits[top_mb->partitioning];
862  top_mv = top_mb->bmv;
863 
867  else
868  part_idx = VP8_SPLITMVMODE_8x8;
869  } else {
870  part_idx = VP8_SPLITMVMODE_4x4;
871  }
872 
873  num = vp8_mbsplit_count[part_idx];
874  mbsplits_cur = vp8_mbsplits[part_idx],
875  firstidx = vp8_mbfirstidx[part_idx];
876  mb->partitioning = part_idx;
877 
878  for (n = 0; n < num; n++) {
879  int k = firstidx[n];
880  uint32_t left, above;
881  const uint8_t *submv_prob;
882 
883  if (!(k & 3))
884  left = AV_RN32A(&left_mv[mbsplits_left[k + 3]]);
885  else
886  left = AV_RN32A(&cur_mv[mbsplits_cur[k - 1]]);
887  if (k <= 3)
888  above = AV_RN32A(&top_mv[mbsplits_top[k + 12]]);
889  else
890  above = AV_RN32A(&cur_mv[mbsplits_cur[k - 4]]);
891 
892  submv_prob = get_submv_prob(left, above, is_vp7);
893 
894  if (vp56_rac_get_prob_branchy(c, submv_prob[0])) {
895  if (vp56_rac_get_prob_branchy(c, submv_prob[1])) {
896  if (vp56_rac_get_prob_branchy(c, submv_prob[2])) {
897  mb->bmv[n].y = mb->mv.y +
898  read_mv_component(c, s->prob->mvc[0], is_vp7);
899  mb->bmv[n].x = mb->mv.x +
900  read_mv_component(c, s->prob->mvc[1], is_vp7);
901  } else {
902  AV_ZERO32(&mb->bmv[n]);
903  }
904  } else {
905  AV_WN32A(&mb->bmv[n], above);
906  }
907  } else {
908  AV_WN32A(&mb->bmv[n], left);
909  }
910  }
911 
912  return num;
913 }
914 
915 /**
916  * The vp7 reference decoder uses a padding macroblock column (added to right
917  * edge of the frame) to guard against illegal macroblock offsets. The
918  * algorithm has bugs that permit offsets to straddle the padding column.
919  * This function replicates those bugs.
920  *
921  * @param[out] edge_x macroblock x address
922  * @param[out] edge_y macroblock y address
923  *
924  * @return macroblock offset legal (boolean)
925  */
926 static int vp7_calculate_mb_offset(int mb_x, int mb_y, int mb_width,
927  int xoffset, int yoffset, int boundary,
928  int *edge_x, int *edge_y)
929 {
930  int vwidth = mb_width + 1;
931  int new = (mb_y + yoffset) * vwidth + mb_x + xoffset;
932  if (new < boundary || new % vwidth == vwidth - 1)
933  return 0;
934  *edge_y = new / vwidth;
935  *edge_x = new % vwidth;
936  return 1;
937 }
938 
939 static const VP56mv *get_bmv_ptr(const VP8Macroblock *mb, int subblock)
940 {
941  return &mb->bmv[mb->mode == VP8_MVMODE_SPLIT ? vp8_mbsplits[mb->partitioning][subblock] : 0];
942 }
943 
944 static av_always_inline
946  int mb_x, int mb_y, int layout)
947 {
948  VP8Macroblock *mb_edge[12];
949  enum { CNT_ZERO, CNT_NEAREST, CNT_NEAR };
950  enum { VP8_EDGE_TOP, VP8_EDGE_LEFT, VP8_EDGE_TOPLEFT };
951  int idx = CNT_ZERO;
952  VP56mv near_mv[3];
953  uint8_t cnt[3] = { 0 };
954  VP56RangeCoder *c = &s->c;
955  int i;
956 
957  AV_ZERO32(&near_mv[0]);
958  AV_ZERO32(&near_mv[1]);
959  AV_ZERO32(&near_mv[2]);
960 
961  for (i = 0; i < VP7_MV_PRED_COUNT; i++) {
962  const VP7MVPred * pred = &vp7_mv_pred[i];
963  int edge_x, edge_y;
964 
965  if (vp7_calculate_mb_offset(mb_x, mb_y, s->mb_width, pred->xoffset,
966  pred->yoffset, !s->profile, &edge_x, &edge_y)) {
967  VP8Macroblock *edge = mb_edge[i] = (s->mb_layout == 1)
968  ? s->macroblocks_base + 1 + edge_x +
969  (s->mb_width + 1) * (edge_y + 1)
970  : s->macroblocks + edge_x +
971  (s->mb_height - edge_y - 1) * 2;
972  uint32_t mv = AV_RN32A(get_bmv_ptr(edge, vp7_mv_pred[i].subblock));
973  if (mv) {
974  if (AV_RN32A(&near_mv[CNT_NEAREST])) {
975  if (mv == AV_RN32A(&near_mv[CNT_NEAREST])) {
976  idx = CNT_NEAREST;
977  } else if (AV_RN32A(&near_mv[CNT_NEAR])) {
978  if (mv != AV_RN32A(&near_mv[CNT_NEAR]))
979  continue;
980  idx = CNT_NEAR;
981  } else {
982  AV_WN32A(&near_mv[CNT_NEAR], mv);
983  idx = CNT_NEAR;
984  }
985  } else {
986  AV_WN32A(&near_mv[CNT_NEAREST], mv);
987  idx = CNT_NEAREST;
988  }
989  } else {
990  idx = CNT_ZERO;
991  }
992  } else {
993  idx = CNT_ZERO;
994  }
995  cnt[idx] += vp7_mv_pred[i].score;
996  }
997 
999 
1000  if (vp56_rac_get_prob_branchy(c, vp7_mode_contexts[cnt[CNT_ZERO]][0])) {
1001  mb->mode = VP8_MVMODE_MV;
1002 
1003  if (vp56_rac_get_prob_branchy(c, vp7_mode_contexts[cnt[CNT_NEAREST]][1])) {
1004 
1005  if (vp56_rac_get_prob_branchy(c, vp7_mode_contexts[cnt[CNT_NEAR]][2])) {
1006 
1007  if (cnt[CNT_NEAREST] > cnt[CNT_NEAR])
1008  AV_WN32A(&mb->mv, cnt[CNT_ZERO] > cnt[CNT_NEAREST] ? 0 : AV_RN32A(&near_mv[CNT_NEAREST]));
1009  else
1010  AV_WN32A(&mb->mv, cnt[CNT_ZERO] > cnt[CNT_NEAR] ? 0 : AV_RN32A(&near_mv[CNT_NEAR]));
1011 
1012  if (vp56_rac_get_prob_branchy(c, vp7_mode_contexts[cnt[CNT_NEAR]][3])) {
1013  mb->mode = VP8_MVMODE_SPLIT;
1014  mb->mv = mb->bmv[decode_splitmvs(s, c, mb, layout, IS_VP7) - 1];
1015  } else {
1016  mb->mv.y += vp7_read_mv_component(c, s->prob->mvc[0]);
1017  mb->mv.x += vp7_read_mv_component(c, s->prob->mvc[1]);
1018  mb->bmv[0] = mb->mv;
1019  }
1020  } else {
1021  mb->mv = near_mv[CNT_NEAR];
1022  mb->bmv[0] = mb->mv;
1023  }
1024  } else {
1025  mb->mv = near_mv[CNT_NEAREST];
1026  mb->bmv[0] = mb->mv;
1027  }
1028  } else {
1029  mb->mode = VP8_MVMODE_ZERO;
1030  AV_ZERO32(&mb->mv);
1031  mb->bmv[0] = mb->mv;
1032  }
1033 }
1034 
1035 static av_always_inline
1037  int mb_x, int mb_y, int layout)
1038 {
1039  VP8Macroblock *mb_edge[3] = { 0 /* top */,
1040  mb - 1 /* left */,
1041  0 /* top-left */ };
1042  enum { CNT_ZERO, CNT_NEAREST, CNT_NEAR, CNT_SPLITMV };
1043  enum { VP8_EDGE_TOP, VP8_EDGE_LEFT, VP8_EDGE_TOPLEFT };
1044  int idx = CNT_ZERO;
1045  int cur_sign_bias = s->sign_bias[mb->ref_frame];
1046  int8_t *sign_bias = s->sign_bias;
1047  VP56mv near_mv[4];
1048  uint8_t cnt[4] = { 0 };
1049  VP56RangeCoder *c = &s->c;
1050 
1051  if (!layout) { // layout is inlined (s->mb_layout is not)
1052  mb_edge[0] = mb + 2;
1053  mb_edge[2] = mb + 1;
1054  } else {
1055  mb_edge[0] = mb - s->mb_width - 1;
1056  mb_edge[2] = mb - s->mb_width - 2;
1057  }
1058 
1059  AV_ZERO32(&near_mv[0]);
1060  AV_ZERO32(&near_mv[1]);
1061  AV_ZERO32(&near_mv[2]);
1062 
1063  /* Process MB on top, left and top-left */
1064 #define MV_EDGE_CHECK(n) \
1065  { \
1066  VP8Macroblock *edge = mb_edge[n]; \
1067  int edge_ref = edge->ref_frame; \
1068  if (edge_ref != VP56_FRAME_CURRENT) { \
1069  uint32_t mv = AV_RN32A(&edge->mv); \
1070  if (mv) { \
1071  if (cur_sign_bias != sign_bias[edge_ref]) { \
1072  /* SWAR negate of the values in mv. */ \
1073  mv = ~mv; \
1074  mv = ((mv & 0x7fff7fff) + \
1075  0x00010001) ^ (mv & 0x80008000); \
1076  } \
1077  if (!n || mv != AV_RN32A(&near_mv[idx])) \
1078  AV_WN32A(&near_mv[++idx], mv); \
1079  cnt[idx] += 1 + (n != 2); \
1080  } else \
1081  cnt[CNT_ZERO] += 1 + (n != 2); \
1082  } \
1083  }
1084 
1085  MV_EDGE_CHECK(0)
1086  MV_EDGE_CHECK(1)
1087  MV_EDGE_CHECK(2)
1088 
1090  if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_ZERO]][0])) {
1091  mb->mode = VP8_MVMODE_MV;
1092 
1093  /* If we have three distinct MVs, merge first and last if they're the same */
1094  if (cnt[CNT_SPLITMV] &&
1095  AV_RN32A(&near_mv[1 + VP8_EDGE_TOP]) == AV_RN32A(&near_mv[1 + VP8_EDGE_TOPLEFT]))
1096  cnt[CNT_NEAREST] += 1;
1097 
1098  /* Swap near and nearest if necessary */
1099  if (cnt[CNT_NEAR] > cnt[CNT_NEAREST]) {
1100  FFSWAP(uint8_t, cnt[CNT_NEAREST], cnt[CNT_NEAR]);
1101  FFSWAP( VP56mv, near_mv[CNT_NEAREST], near_mv[CNT_NEAR]);
1102  }
1103 
1104  if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_NEAREST]][1])) {
1105  if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_NEAR]][2])) {
1106  /* Choose the best mv out of 0,0 and the nearest mv */
1107  clamp_mv(mv_bounds, &mb->mv, &near_mv[CNT_ZERO + (cnt[CNT_NEAREST] >= cnt[CNT_ZERO])]);
1108  cnt[CNT_SPLITMV] = ((mb_edge[VP8_EDGE_LEFT]->mode == VP8_MVMODE_SPLIT) +
1109  (mb_edge[VP8_EDGE_TOP]->mode == VP8_MVMODE_SPLIT)) * 2 +
1110  (mb_edge[VP8_EDGE_TOPLEFT]->mode == VP8_MVMODE_SPLIT);
1111 
1112  if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_SPLITMV]][3])) {
1113  mb->mode = VP8_MVMODE_SPLIT;
1114  mb->mv = mb->bmv[decode_splitmvs(s, c, mb, layout, IS_VP8) - 1];
1115  } else {
1116  mb->mv.y += vp8_read_mv_component(c, s->prob->mvc[0]);
1117  mb->mv.x += vp8_read_mv_component(c, s->prob->mvc[1]);
1118  mb->bmv[0] = mb->mv;
1119  }
1120  } else {
1121  clamp_mv(mv_bounds, &mb->mv, &near_mv[CNT_NEAR]);
1122  mb->bmv[0] = mb->mv;
1123  }
1124  } else {
1125  clamp_mv(mv_bounds, &mb->mv, &near_mv[CNT_NEAREST]);
1126  mb->bmv[0] = mb->mv;
1127  }
1128  } else {
1129  mb->mode = VP8_MVMODE_ZERO;
1130  AV_ZERO32(&mb->mv);
1131  mb->bmv[0] = mb->mv;
1132  }
1133 }
1134 
1135 static av_always_inline
1137  int mb_x, int keyframe, int layout)
1138 {
1139  uint8_t *intra4x4 = mb->intra4x4_pred_mode_mb;
1140 
1141  if (layout) {
1142  VP8Macroblock *mb_top = mb - s->mb_width - 1;
1143  memcpy(mb->intra4x4_pred_mode_top, mb_top->intra4x4_pred_mode_top, 4);
1144  }
1145  if (keyframe) {
1146  int x, y;
1147  uint8_t *top;
1148  uint8_t *const left = s->intra4x4_pred_mode_left;
1149  if (layout)
1150  top = mb->intra4x4_pred_mode_top;
1151  else
1152  top = s->intra4x4_pred_mode_top + 4 * mb_x;
1153  for (y = 0; y < 4; y++) {
1154  for (x = 0; x < 4; x++) {
1155  const uint8_t *ctx;
1156  ctx = vp8_pred4x4_prob_intra[top[x]][left[y]];
1157  *intra4x4 = vp8_rac_get_tree(c, vp8_pred4x4_tree, ctx);
1158  left[y] = top[x] = *intra4x4;
1159  intra4x4++;
1160  }
1161  }
1162  } else {
1163  int i;
1164  for (i = 0; i < 16; i++)
1165  intra4x4[i] = vp8_rac_get_tree(c, vp8_pred4x4_tree,
1167  }
1168 }
1169 
1170 static av_always_inline
1172  VP8Macroblock *mb, int mb_x, int mb_y,
1173  uint8_t *segment, uint8_t *ref, int layout, int is_vp7)
1174 {
1175  VP56RangeCoder *c = &s->c;
1176  static const char * const vp7_feature_name[] = { "q-index",
1177  "lf-delta",
1178  "partial-golden-update",
1179  "blit-pitch" };
1180  if (is_vp7) {
1181  int i;
1182  *segment = 0;
1183  for (i = 0; i < 4; i++) {
1184  if (s->feature_enabled[i]) {
1187  s->feature_index_prob[i]);
1189  "Feature %s present in macroblock (value 0x%x)\n",
1190  vp7_feature_name[i], s->feature_value[i][index]);
1191  }
1192  }
1193  }
1194  } else if (s->segmentation.update_map) {
1195  int bit = vp56_rac_get_prob(c, s->prob->segmentid[0]);
1196  *segment = vp56_rac_get_prob(c, s->prob->segmentid[1+bit]) + 2*bit;
1197  } else if (s->segmentation.enabled)
1198  *segment = ref ? *ref : *segment;
1199  mb->segment = *segment;
1200 
1201  mb->skip = s->mbskip_enabled ? vp56_rac_get_prob(c, s->prob->mbskip) : 0;
1202 
1203  if (s->keyframe) {
1206 
1207  if (mb->mode == MODE_I4x4) {
1208  decode_intra4x4_modes(s, c, mb, mb_x, 1, layout);
1209  } else {
1210  const uint32_t modes = (is_vp7 ? vp7_pred4x4_mode
1211  : vp8_pred4x4_mode)[mb->mode] * 0x01010101u;
1212  if (s->mb_layout)
1213  AV_WN32A(mb->intra4x4_pred_mode_top, modes);
1214  else
1215  AV_WN32A(s->intra4x4_pred_mode_top + 4 * mb_x, modes);
1216  AV_WN32A(s->intra4x4_pred_mode_left, modes);
1217  }
1218 
1222  } else if (vp56_rac_get_prob_branchy(c, s->prob->intra)) {
1223  // inter MB, 16.2
1224  if (vp56_rac_get_prob_branchy(c, s->prob->last))
1225  mb->ref_frame =
1226  (!is_vp7 && vp56_rac_get_prob(c, s->prob->golden)) ? VP56_FRAME_GOLDEN2 /* altref */
1228  else
1230  s->ref_count[mb->ref_frame - 1]++;
1231 
1232  // motion vectors, 16.3
1233  if (is_vp7)
1234  vp7_decode_mvs(s, mb, mb_x, mb_y, layout);
1235  else
1236  vp8_decode_mvs(s, mv_bounds, mb, mb_x, mb_y, layout);
1237  } else {
1238  // intra MB, 16.1
1240 
1241  if (mb->mode == MODE_I4x4)
1242  decode_intra4x4_modes(s, c, mb, mb_x, 0, layout);
1243 
1245  s->prob->pred8x8c);
1248  AV_ZERO32(&mb->bmv[0]);
1249  }
1250 }
1251 
1252 /**
1253  * @param r arithmetic bitstream reader context
1254  * @param block destination for block coefficients
1255  * @param probs probabilities to use when reading trees from the bitstream
1256  * @param i initial coeff index, 0 unless a separate DC block is coded
1257  * @param qmul array holding the dc/ac dequant factor at position 0/1
1258  *
1259  * @return 0 if no coeffs were decoded
1260  * otherwise, the index of the last coeff decoded plus one
1261  */
1262 static av_always_inline
1264  uint8_t probs[16][3][NUM_DCT_TOKENS - 1],
1265  int i, uint8_t *token_prob, int16_t qmul[2],
1266  const uint8_t scan[16], int vp7)
1267 {
1268  VP56RangeCoder c = *r;
1269  goto skip_eob;
1270  do {
1271  int coeff;
1272 restart:
1273  if (!vp56_rac_get_prob_branchy(&c, token_prob[0])) // DCT_EOB
1274  break;
1275 
1276 skip_eob:
1277  if (!vp56_rac_get_prob_branchy(&c, token_prob[1])) { // DCT_0
1278  if (++i == 16)
1279  break; // invalid input; blocks should end with EOB
1280  token_prob = probs[i][0];
1281  if (vp7)
1282  goto restart;
1283  goto skip_eob;
1284  }
1285 
1286  if (!vp56_rac_get_prob_branchy(&c, token_prob[2])) { // DCT_1
1287  coeff = 1;
1288  token_prob = probs[i + 1][1];
1289  } else {
1290  if (!vp56_rac_get_prob_branchy(&c, token_prob[3])) { // DCT 2,3,4
1291  coeff = vp56_rac_get_prob_branchy(&c, token_prob[4]);
1292  if (coeff)
1293  coeff += vp56_rac_get_prob(&c, token_prob[5]);
1294  coeff += 2;
1295  } else {
1296  // DCT_CAT*
1297  if (!vp56_rac_get_prob_branchy(&c, token_prob[6])) {
1298  if (!vp56_rac_get_prob_branchy(&c, token_prob[7])) { // DCT_CAT1
1299  coeff = 5 + vp56_rac_get_prob(&c, vp8_dct_cat1_prob[0]);
1300  } else { // DCT_CAT2
1301  coeff = 7;
1302  coeff += vp56_rac_get_prob(&c, vp8_dct_cat2_prob[0]) << 1;
1303  coeff += vp56_rac_get_prob(&c, vp8_dct_cat2_prob[1]);
1304  }
1305  } else { // DCT_CAT3 and up
1306  int a = vp56_rac_get_prob(&c, token_prob[8]);
1307  int b = vp56_rac_get_prob(&c, token_prob[9 + a]);
1308  int cat = (a << 1) + b;
1309  coeff = 3 + (8 << cat);
1310  coeff += vp8_rac_get_coeff(&c, ff_vp8_dct_cat_prob[cat]);
1311  }
1312  }
1313  token_prob = probs[i + 1][2];
1314  }
1315  block[scan[i]] = (vp8_rac_get(&c) ? -coeff : coeff) * qmul[!!i];
1316  } while (++i < 16);
1317 
1318  *r = c;
1319  return i;
1320 }
1321 
1322 static av_always_inline
1323 int inter_predict_dc(int16_t block[16], int16_t pred[2])
1324 {
1325  int16_t dc = block[0];
1326  int ret = 0;
1327 
1328  if (pred[1] > 3) {
1329  dc += pred[0];
1330  ret = 1;
1331  }
1332 
1333  if (!pred[0] | !dc | ((int32_t)pred[0] ^ (int32_t)dc) >> 31) {
1334  block[0] = pred[0] = dc;
1335  pred[1] = 0;
1336  } else {
1337  if (pred[0] == dc)
1338  pred[1]++;
1339  block[0] = pred[0] = dc;
1340  }
1341 
1342  return ret;
1343 }
1344 
1346  int16_t block[16],
1347  uint8_t probs[16][3][NUM_DCT_TOKENS - 1],
1348  int i, uint8_t *token_prob,
1349  int16_t qmul[2],
1350  const uint8_t scan[16])
1351 {
1352  return decode_block_coeffs_internal(r, block, probs, i,
1353  token_prob, qmul, scan, IS_VP7);
1354 }
1355 
1356 #ifndef vp8_decode_block_coeffs_internal
1358  int16_t block[16],
1359  uint8_t probs[16][3][NUM_DCT_TOKENS - 1],
1360  int i, uint8_t *token_prob,
1361  int16_t qmul[2])
1362 {
1363  return decode_block_coeffs_internal(r, block, probs, i,
1364  token_prob, qmul, ff_zigzag_scan, IS_VP8);
1365 }
1366 #endif
1367 
1368 /**
1369  * @param c arithmetic bitstream reader context
1370  * @param block destination for block coefficients
1371  * @param probs probabilities to use when reading trees from the bitstream
1372  * @param i initial coeff index, 0 unless a separate DC block is coded
1373  * @param zero_nhood the initial prediction context for number of surrounding
1374  * all-zero blocks (only left/top, so 0-2)
1375  * @param qmul array holding the dc/ac dequant factor at position 0/1
1376  * @param scan scan pattern (VP7 only)
1377  *
1378  * @return 0 if no coeffs were decoded
1379  * otherwise, the index of the last coeff decoded plus one
1380  */
1381 static av_always_inline
1383  uint8_t probs[16][3][NUM_DCT_TOKENS - 1],
1384  int i, int zero_nhood, int16_t qmul[2],
1385  const uint8_t scan[16], int vp7)
1386 {
1387  uint8_t *token_prob = probs[i][zero_nhood];
1388  if (!vp56_rac_get_prob_branchy(c, token_prob[0])) // DCT_EOB
1389  return 0;
1390  return vp7 ? vp7_decode_block_coeffs_internal(c, block, probs, i,
1391  token_prob, qmul, scan)
1392  : vp8_decode_block_coeffs_internal(c, block, probs, i,
1393  token_prob, qmul);
1394 }
1395 
1396 static av_always_inline
1398  VP8Macroblock *mb, uint8_t t_nnz[9], uint8_t l_nnz[9],
1399  int is_vp7)
1400 {
1401  int i, x, y, luma_start = 0, luma_ctx = 3;
1402  int nnz_pred, nnz, nnz_total = 0;
1403  int segment = mb->segment;
1404  int block_dc = 0;
1405 
1406  if (mb->mode != MODE_I4x4 && (is_vp7 || mb->mode != VP8_MVMODE_SPLIT)) {
1407  nnz_pred = t_nnz[8] + l_nnz[8];
1408 
1409  // decode DC values and do hadamard
1410  nnz = decode_block_coeffs(c, td->block_dc, s->prob->token[1], 0,
1411  nnz_pred, s->qmat[segment].luma_dc_qmul,
1412  ff_zigzag_scan, is_vp7);
1413  l_nnz[8] = t_nnz[8] = !!nnz;
1414 
1415  if (is_vp7 && mb->mode > MODE_I4x4) {
1416  nnz |= inter_predict_dc(td->block_dc,
1417  s->inter_dc_pred[mb->ref_frame - 1]);
1418  }
1419 
1420  if (nnz) {
1421  nnz_total += nnz;
1422  block_dc = 1;
1423  if (nnz == 1)
1424  s->vp8dsp.vp8_luma_dc_wht_dc(td->block, td->block_dc);
1425  else
1426  s->vp8dsp.vp8_luma_dc_wht(td->block, td->block_dc);
1427  }
1428  luma_start = 1;
1429  luma_ctx = 0;
1430  }
1431 
1432  // luma blocks
1433  for (y = 0; y < 4; y++)
1434  for (x = 0; x < 4; x++) {
1435  nnz_pred = l_nnz[y] + t_nnz[x];
1436  nnz = decode_block_coeffs(c, td->block[y][x],
1437  s->prob->token[luma_ctx],
1438  luma_start, nnz_pred,
1439  s->qmat[segment].luma_qmul,
1440  s->prob[0].scan, is_vp7);
1441  /* nnz+block_dc may be one more than the actual last index,
1442  * but we don't care */
1443  td->non_zero_count_cache[y][x] = nnz + block_dc;
1444  t_nnz[x] = l_nnz[y] = !!nnz;
1445  nnz_total += nnz;
1446  }
1447 
1448  // chroma blocks
1449  // TODO: what to do about dimensions? 2nd dim for luma is x,
1450  // but for chroma it's (y<<1)|x
1451  for (i = 4; i < 6; i++)
1452  for (y = 0; y < 2; y++)
1453  for (x = 0; x < 2; x++) {
1454  nnz_pred = l_nnz[i + 2 * y] + t_nnz[i + 2 * x];
1455  nnz = decode_block_coeffs(c, td->block[i][(y << 1) + x],
1456  s->prob->token[2], 0, nnz_pred,
1457  s->qmat[segment].chroma_qmul,
1458  s->prob[0].scan, is_vp7);
1459  td->non_zero_count_cache[i][(y << 1) + x] = nnz;
1460  t_nnz[i + 2 * x] = l_nnz[i + 2 * y] = !!nnz;
1461  nnz_total += nnz;
1462  }
1463 
1464  // if there were no coded coeffs despite the macroblock not being marked skip,
1465  // we MUST not do the inner loop filter and should not do IDCT
1466  // Since skip isn't used for bitstream prediction, just manually set it.
1467  if (!nnz_total)
1468  mb->skip = 1;
1469 }
1470 
1471 static av_always_inline
1472 void backup_mb_border(uint8_t *top_border, uint8_t *src_y,
1473  uint8_t *src_cb, uint8_t *src_cr,
1474  ptrdiff_t linesize, ptrdiff_t uvlinesize, int simple)
1475 {
1476  AV_COPY128(top_border, src_y + 15 * linesize);
1477  if (!simple) {
1478  AV_COPY64(top_border + 16, src_cb + 7 * uvlinesize);
1479  AV_COPY64(top_border + 24, src_cr + 7 * uvlinesize);
1480  }
1481 }
1482 
1483 static av_always_inline
1484 void xchg_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb,
1485  uint8_t *src_cr, ptrdiff_t linesize, ptrdiff_t uvlinesize, int mb_x,
1486  int mb_y, int mb_width, int simple, int xchg)
1487 {
1488  uint8_t *top_border_m1 = top_border - 32; // for TL prediction
1489  src_y -= linesize;
1490  src_cb -= uvlinesize;
1491  src_cr -= uvlinesize;
1492 
1493 #define XCHG(a, b, xchg) \
1494  do { \
1495  if (xchg) \
1496  AV_SWAP64(b, a); \
1497  else \
1498  AV_COPY64(b, a); \
1499  } while (0)
1500 
1501  XCHG(top_border_m1 + 8, src_y - 8, xchg);
1502  XCHG(top_border, src_y, xchg);
1503  XCHG(top_border + 8, src_y + 8, 1);
1504  if (mb_x < mb_width - 1)
1505  XCHG(top_border + 32, src_y + 16, 1);
1506 
1507  // only copy chroma for normal loop filter
1508  // or to initialize the top row to 127
1509  if (!simple || !mb_y) {
1510  XCHG(top_border_m1 + 16, src_cb - 8, xchg);
1511  XCHG(top_border_m1 + 24, src_cr - 8, xchg);
1512  XCHG(top_border + 16, src_cb, 1);
1513  XCHG(top_border + 24, src_cr, 1);
1514  }
1515 }
1516 
1517 static av_always_inline
1518 int check_dc_pred8x8_mode(int mode, int mb_x, int mb_y)
1519 {
1520  if (!mb_x)
1521  return mb_y ? TOP_DC_PRED8x8 : DC_128_PRED8x8;
1522  else
1523  return mb_y ? mode : LEFT_DC_PRED8x8;
1524 }
1525 
1526 static av_always_inline
1527 int check_tm_pred8x8_mode(int mode, int mb_x, int mb_y, int vp7)
1528 {
1529  if (!mb_x)
1530  return mb_y ? VERT_PRED8x8 : (vp7 ? DC_128_PRED8x8 : DC_129_PRED8x8);
1531  else
1532  return mb_y ? mode : HOR_PRED8x8;
1533 }
1534 
1535 static av_always_inline
1536 int check_intra_pred8x8_mode_emuedge(int mode, int mb_x, int mb_y, int vp7)
1537 {
1538  switch (mode) {
1539  case DC_PRED8x8:
1540  return check_dc_pred8x8_mode(mode, mb_x, mb_y);
1541  case VERT_PRED8x8:
1542  return !mb_y ? (vp7 ? DC_128_PRED8x8 : DC_127_PRED8x8) : mode;
1543  case HOR_PRED8x8:
1544  return !mb_x ? (vp7 ? DC_128_PRED8x8 : DC_129_PRED8x8) : mode;
1545  case PLANE_PRED8x8: /* TM */
1546  return check_tm_pred8x8_mode(mode, mb_x, mb_y, vp7);
1547  }
1548  return mode;
1549 }
1550 
1551 static av_always_inline
1552 int check_tm_pred4x4_mode(int mode, int mb_x, int mb_y, int vp7)
1553 {
1554  if (!mb_x) {
1555  return mb_y ? VERT_VP8_PRED : (vp7 ? DC_128_PRED : DC_129_PRED);
1556  } else {
1557  return mb_y ? mode : HOR_VP8_PRED;
1558  }
1559 }
1560 
1561 static av_always_inline
1562 int check_intra_pred4x4_mode_emuedge(int mode, int mb_x, int mb_y,
1563  int *copy_buf, int vp7)
1564 {
1565  switch (mode) {
1566  case VERT_PRED:
1567  if (!mb_x && mb_y) {
1568  *copy_buf = 1;
1569  return mode;
1570  }
1571  /* fall-through */
1572  case DIAG_DOWN_LEFT_PRED:
1573  case VERT_LEFT_PRED:
1574  return !mb_y ? (vp7 ? DC_128_PRED : DC_127_PRED) : mode;
1575  case HOR_PRED:
1576  if (!mb_y) {
1577  *copy_buf = 1;
1578  return mode;
1579  }
1580  /* fall-through */
1581  case HOR_UP_PRED:
1582  return !mb_x ? (vp7 ? DC_128_PRED : DC_129_PRED) : mode;
1583  case TM_VP8_PRED:
1584  return check_tm_pred4x4_mode(mode, mb_x, mb_y, vp7);
1585  case DC_PRED: /* 4x4 DC doesn't use the same "H.264-style" exceptions
1586  * as 16x16/8x8 DC */
1587  case DIAG_DOWN_RIGHT_PRED:
1588  case VERT_RIGHT_PRED:
1589  case HOR_DOWN_PRED:
1590  if (!mb_y || !mb_x)
1591  *copy_buf = 1;
1592  return mode;
1593  }
1594  return mode;
1595 }
1596 
1597 static av_always_inline
1599  VP8Macroblock *mb, int mb_x, int mb_y, int is_vp7)
1600 {
1601  int x, y, mode, nnz;
1602  uint32_t tr;
1603 
1604  /* for the first row, we need to run xchg_mb_border to init the top edge
1605  * to 127 otherwise, skip it if we aren't going to deblock */
1606  if (mb_y && (s->deblock_filter || !mb_y) && td->thread_nr == 0)
1607  xchg_mb_border(s->top_border[mb_x + 1], dst[0], dst[1], dst[2],
1608  s->linesize, s->uvlinesize, mb_x, mb_y, s->mb_width,
1609  s->filter.simple, 1);
1610 
1611  if (mb->mode < MODE_I4x4) {
1612  mode = check_intra_pred8x8_mode_emuedge(mb->mode, mb_x, mb_y, is_vp7);
1613  s->hpc.pred16x16[mode](dst[0], s->linesize);
1614  } else {
1615  uint8_t *ptr = dst[0];
1616  uint8_t *intra4x4 = mb->intra4x4_pred_mode_mb;
1617  const uint8_t lo = is_vp7 ? 128 : 127;
1618  const uint8_t hi = is_vp7 ? 128 : 129;
1619  uint8_t tr_top[4] = { lo, lo, lo, lo };
1620 
1621  // all blocks on the right edge of the macroblock use bottom edge
1622  // the top macroblock for their topright edge
1623  uint8_t *tr_right = ptr - s->linesize + 16;
1624 
1625  // if we're on the right edge of the frame, said edge is extended
1626  // from the top macroblock
1627  if (mb_y && mb_x == s->mb_width - 1) {
1628  tr = tr_right[-1] * 0x01010101u;
1629  tr_right = (uint8_t *) &tr;
1630  }
1631 
1632  if (mb->skip)
1634 
1635  for (y = 0; y < 4; y++) {
1636  uint8_t *topright = ptr + 4 - s->linesize;
1637  for (x = 0; x < 4; x++) {
1638  int copy = 0;
1639  ptrdiff_t linesize = s->linesize;
1640  uint8_t *dst = ptr + 4 * x;
1641  LOCAL_ALIGNED(4, uint8_t, copy_dst, [5 * 8]);
1642 
1643  if ((y == 0 || x == 3) && mb_y == 0) {
1644  topright = tr_top;
1645  } else if (x == 3)
1646  topright = tr_right;
1647 
1648  mode = check_intra_pred4x4_mode_emuedge(intra4x4[x], mb_x + x,
1649  mb_y + y, &copy, is_vp7);
1650  if (copy) {
1651  dst = copy_dst + 12;
1652  linesize = 8;
1653  if (!(mb_y + y)) {
1654  copy_dst[3] = lo;
1655  AV_WN32A(copy_dst + 4, lo * 0x01010101U);
1656  } else {
1657  AV_COPY32(copy_dst + 4, ptr + 4 * x - s->linesize);
1658  if (!(mb_x + x)) {
1659  copy_dst[3] = hi;
1660  } else {
1661  copy_dst[3] = ptr[4 * x - s->linesize - 1];
1662  }
1663  }
1664  if (!(mb_x + x)) {
1665  copy_dst[11] =
1666  copy_dst[19] =
1667  copy_dst[27] =
1668  copy_dst[35] = hi;
1669  } else {
1670  copy_dst[11] = ptr[4 * x - 1];
1671  copy_dst[19] = ptr[4 * x + s->linesize - 1];
1672  copy_dst[27] = ptr[4 * x + s->linesize * 2 - 1];
1673  copy_dst[35] = ptr[4 * x + s->linesize * 3 - 1];
1674  }
1675  }
1676  s->hpc.pred4x4[mode](dst, topright, linesize);
1677  if (copy) {
1678  AV_COPY32(ptr + 4 * x, copy_dst + 12);
1679  AV_COPY32(ptr + 4 * x + s->linesize, copy_dst + 20);
1680  AV_COPY32(ptr + 4 * x + s->linesize * 2, copy_dst + 28);
1681  AV_COPY32(ptr + 4 * x + s->linesize * 3, copy_dst + 36);
1682  }
1683 
1684  nnz = td->non_zero_count_cache[y][x];
1685  if (nnz) {
1686  if (nnz == 1)
1687  s->vp8dsp.vp8_idct_dc_add(ptr + 4 * x,
1688  td->block[y][x], s->linesize);
1689  else
1690  s->vp8dsp.vp8_idct_add(ptr + 4 * x,
1691  td->block[y][x], s->linesize);
1692  }
1693  topright += 4;
1694  }
1695 
1696  ptr += 4 * s->linesize;
1697  intra4x4 += 4;
1698  }
1699  }
1700 
1702  mb_x, mb_y, is_vp7);
1703  s->hpc.pred8x8[mode](dst[1], s->uvlinesize);
1704  s->hpc.pred8x8[mode](dst[2], s->uvlinesize);
1705 
1706  if (mb_y && (s->deblock_filter || !mb_y) && td->thread_nr == 0)
1707  xchg_mb_border(s->top_border[mb_x + 1], dst[0], dst[1], dst[2],
1708  s->linesize, s->uvlinesize, mb_x, mb_y, s->mb_width,
1709  s->filter.simple, 0);
1710 }
1711 
1712 static const uint8_t subpel_idx[3][8] = {
1713  { 0, 1, 2, 1, 2, 1, 2, 1 }, // nr. of left extra pixels,
1714  // also function pointer index
1715  { 0, 3, 5, 3, 5, 3, 5, 3 }, // nr. of extra pixels required
1716  { 0, 2, 3, 2, 3, 2, 3, 2 }, // nr. of right extra pixels
1717 };
1718 
1719 /**
1720  * luma MC function
1721  *
1722  * @param s VP8 decoding context
1723  * @param dst target buffer for block data at block position
1724  * @param ref reference picture buffer at origin (0, 0)
1725  * @param mv motion vector (relative to block position) to get pixel data from
1726  * @param x_off horizontal position of block from origin (0, 0)
1727  * @param y_off vertical position of block from origin (0, 0)
1728  * @param block_w width of block (16, 8 or 4)
1729  * @param block_h height of block (always same as block_w)
1730  * @param width width of src/dst plane data
1731  * @param height height of src/dst plane data
1732  * @param linesize size of a single line of plane data, including padding
1733  * @param mc_func motion compensation function pointers (bilinear or sixtap MC)
1734  */
1735 static av_always_inline
1737  ThreadFrame *ref, const VP56mv *mv,
1738  int x_off, int y_off, int block_w, int block_h,
1739  int width, int height, ptrdiff_t linesize,
1740  vp8_mc_func mc_func[3][3])
1741 {
1742  uint8_t *src = ref->f->data[0];
1743 
1744  if (AV_RN32A(mv)) {
1745  ptrdiff_t src_linesize = linesize;
1746 
1747  int mx = (mv->x * 2) & 7, mx_idx = subpel_idx[0][mx];
1748  int my = (mv->y * 2) & 7, my_idx = subpel_idx[0][my];
1749 
1750  x_off += mv->x >> 2;
1751  y_off += mv->y >> 2;
1752 
1753  // edge emulation
1754  ff_thread_await_progress(ref, (3 + y_off + block_h + subpel_idx[2][my]) >> 4, 0);
1755  src += y_off * linesize + x_off;
1756  if (x_off < mx_idx || x_off >= width - block_w - subpel_idx[2][mx] ||
1757  y_off < my_idx || y_off >= height - block_h - subpel_idx[2][my]) {
1759  src - my_idx * linesize - mx_idx,
1760  EDGE_EMU_LINESIZE, linesize,
1761  block_w + subpel_idx[1][mx],
1762  block_h + subpel_idx[1][my],
1763  x_off - mx_idx, y_off - my_idx,
1764  width, height);
1765  src = td->edge_emu_buffer + mx_idx + EDGE_EMU_LINESIZE * my_idx;
1766  src_linesize = EDGE_EMU_LINESIZE;
1767  }
1768  mc_func[my_idx][mx_idx](dst, linesize, src, src_linesize, block_h, mx, my);
1769  } else {
1770  ff_thread_await_progress(ref, (3 + y_off + block_h) >> 4, 0);
1771  mc_func[0][0](dst, linesize, src + y_off * linesize + x_off,
1772  linesize, block_h, 0, 0);
1773  }
1774 }
1775 
1776 /**
1777  * chroma MC function
1778  *
1779  * @param s VP8 decoding context
1780  * @param dst1 target buffer for block data at block position (U plane)
1781  * @param dst2 target buffer for block data at block position (V plane)
1782  * @param ref reference picture buffer at origin (0, 0)
1783  * @param mv motion vector (relative to block position) to get pixel data from
1784  * @param x_off horizontal position of block from origin (0, 0)
1785  * @param y_off vertical position of block from origin (0, 0)
1786  * @param block_w width of block (16, 8 or 4)
1787  * @param block_h height of block (always same as block_w)
1788  * @param width width of src/dst plane data
1789  * @param height height of src/dst plane data
1790  * @param linesize size of a single line of plane data, including padding
1791  * @param mc_func motion compensation function pointers (bilinear or sixtap MC)
1792  */
1793 static av_always_inline
1795  uint8_t *dst2, ThreadFrame *ref, const VP56mv *mv,
1796  int x_off, int y_off, int block_w, int block_h,
1797  int width, int height, ptrdiff_t linesize,
1798  vp8_mc_func mc_func[3][3])
1799 {
1800  uint8_t *src1 = ref->f->data[1], *src2 = ref->f->data[2];
1801 
1802  if (AV_RN32A(mv)) {
1803  int mx = mv->x & 7, mx_idx = subpel_idx[0][mx];
1804  int my = mv->y & 7, my_idx = subpel_idx[0][my];
1805 
1806  x_off += mv->x >> 3;
1807  y_off += mv->y >> 3;
1808 
1809  // edge emulation
1810  src1 += y_off * linesize + x_off;
1811  src2 += y_off * linesize + x_off;
1812  ff_thread_await_progress(ref, (3 + y_off + block_h + subpel_idx[2][my]) >> 3, 0);
1813  if (x_off < mx_idx || x_off >= width - block_w - subpel_idx[2][mx] ||
1814  y_off < my_idx || y_off >= height - block_h - subpel_idx[2][my]) {
1816  src1 - my_idx * linesize - mx_idx,
1817  EDGE_EMU_LINESIZE, linesize,
1818  block_w + subpel_idx[1][mx],
1819  block_h + subpel_idx[1][my],
1820  x_off - mx_idx, y_off - my_idx, width, height);
1821  src1 = td->edge_emu_buffer + mx_idx + EDGE_EMU_LINESIZE * my_idx;
1822  mc_func[my_idx][mx_idx](dst1, linesize, src1, EDGE_EMU_LINESIZE, block_h, mx, my);
1823 
1825  src2 - my_idx * linesize - mx_idx,
1826  EDGE_EMU_LINESIZE, linesize,
1827  block_w + subpel_idx[1][mx],
1828  block_h + subpel_idx[1][my],
1829  x_off - mx_idx, y_off - my_idx, width, height);
1830  src2 = td->edge_emu_buffer + mx_idx + EDGE_EMU_LINESIZE * my_idx;
1831  mc_func[my_idx][mx_idx](dst2, linesize, src2, EDGE_EMU_LINESIZE, block_h, mx, my);
1832  } else {
1833  mc_func[my_idx][mx_idx](dst1, linesize, src1, linesize, block_h, mx, my);
1834  mc_func[my_idx][mx_idx](dst2, linesize, src2, linesize, block_h, mx, my);
1835  }
1836  } else {
1837  ff_thread_await_progress(ref, (3 + y_off + block_h) >> 3, 0);
1838  mc_func[0][0](dst1, linesize, src1 + y_off * linesize + x_off, linesize, block_h, 0, 0);
1839  mc_func[0][0](dst2, linesize, src2 + y_off * linesize + x_off, linesize, block_h, 0, 0);
1840  }
1841 }
1842 
1843 static av_always_inline
1845  ThreadFrame *ref_frame, int x_off, int y_off,
1846  int bx_off, int by_off, int block_w, int block_h,
1847  int width, int height, VP56mv *mv)
1848 {
1849  VP56mv uvmv = *mv;
1850 
1851  /* Y */
1852  vp8_mc_luma(s, td, dst[0] + by_off * s->linesize + bx_off,
1853  ref_frame, mv, x_off + bx_off, y_off + by_off,
1854  block_w, block_h, width, height, s->linesize,
1855  s->put_pixels_tab[block_w == 8]);
1856 
1857  /* U/V */
1858  if (s->profile == 3) {
1859  /* this block only applies VP8; it is safe to check
1860  * only the profile, as VP7 profile <= 1 */
1861  uvmv.x &= ~7;
1862  uvmv.y &= ~7;
1863  }
1864  x_off >>= 1;
1865  y_off >>= 1;
1866  bx_off >>= 1;
1867  by_off >>= 1;
1868  width >>= 1;
1869  height >>= 1;
1870  block_w >>= 1;
1871  block_h >>= 1;
1872  vp8_mc_chroma(s, td, dst[1] + by_off * s->uvlinesize + bx_off,
1873  dst[2] + by_off * s->uvlinesize + bx_off, ref_frame,
1874  &uvmv, x_off + bx_off, y_off + by_off,
1875  block_w, block_h, width, height, s->uvlinesize,
1876  s->put_pixels_tab[1 + (block_w == 4)]);
1877 }
1878 
1879 /* Fetch pixels for estimated mv 4 macroblocks ahead.
1880  * Optimized for 64-byte cache lines. Inspired by ffh264 prefetch_motion. */
1881 static av_always_inline
1882 void prefetch_motion(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y,
1883  int mb_xy, int ref)
1884 {
1885  /* Don't prefetch refs that haven't been used very often this frame. */
1886  if (s->ref_count[ref - 1] > (mb_xy >> 5)) {
1887  int x_off = mb_x << 4, y_off = mb_y << 4;
1888  int mx = (mb->mv.x >> 2) + x_off + 8;
1889  int my = (mb->mv.y >> 2) + y_off;
1890  uint8_t **src = s->framep[ref]->tf.f->data;
1891  int off = mx + (my + (mb_x & 3) * 4) * s->linesize + 64;
1892  /* For threading, a ff_thread_await_progress here might be useful, but
1893  * it actually slows down the decoder. Since a bad prefetch doesn't
1894  * generate bad decoder output, we don't run it here. */
1895  s->vdsp.prefetch(src[0] + off, s->linesize, 4);
1896  off = (mx >> 1) + ((my >> 1) + (mb_x & 7)) * s->uvlinesize + 64;
1897  s->vdsp.prefetch(src[1] + off, src[2] - src[1], 2);
1898  }
1899 }
1900 
1901 /**
1902  * Apply motion vectors to prediction buffer, chapter 18.
1903  */
1904 static av_always_inline
1906  VP8Macroblock *mb, int mb_x, int mb_y)
1907 {
1908  int x_off = mb_x << 4, y_off = mb_y << 4;
1909  int width = 16 * s->mb_width, height = 16 * s->mb_height;
1910  ThreadFrame *ref = &s->framep[mb->ref_frame]->tf;
1911  VP56mv *bmv = mb->bmv;
1912 
1913  switch (mb->partitioning) {
1914  case VP8_SPLITMVMODE_NONE:
1915  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1916  0, 0, 16, 16, width, height, &mb->mv);
1917  break;
1918  case VP8_SPLITMVMODE_4x4: {
1919  int x, y;
1920  VP56mv uvmv;
1921 
1922  /* Y */
1923  for (y = 0; y < 4; y++) {
1924  for (x = 0; x < 4; x++) {
1925  vp8_mc_luma(s, td, dst[0] + 4 * y * s->linesize + x * 4,
1926  ref, &bmv[4 * y + x],
1927  4 * x + x_off, 4 * y + y_off, 4, 4,
1928  width, height, s->linesize,
1929  s->put_pixels_tab[2]);
1930  }
1931  }
1932 
1933  /* U/V */
1934  x_off >>= 1;
1935  y_off >>= 1;
1936  width >>= 1;
1937  height >>= 1;
1938  for (y = 0; y < 2; y++) {
1939  for (x = 0; x < 2; x++) {
1940  uvmv.x = mb->bmv[2 * y * 4 + 2 * x ].x +
1941  mb->bmv[2 * y * 4 + 2 * x + 1].x +
1942  mb->bmv[(2 * y + 1) * 4 + 2 * x ].x +
1943  mb->bmv[(2 * y + 1) * 4 + 2 * x + 1].x;
1944  uvmv.y = mb->bmv[2 * y * 4 + 2 * x ].y +
1945  mb->bmv[2 * y * 4 + 2 * x + 1].y +
1946  mb->bmv[(2 * y + 1) * 4 + 2 * x ].y +
1947  mb->bmv[(2 * y + 1) * 4 + 2 * x + 1].y;
1948  uvmv.x = (uvmv.x + 2 + FF_SIGNBIT(uvmv.x)) >> 2;
1949  uvmv.y = (uvmv.y + 2 + FF_SIGNBIT(uvmv.y)) >> 2;
1950  if (s->profile == 3) {
1951  uvmv.x &= ~7;
1952  uvmv.y &= ~7;
1953  }
1954  vp8_mc_chroma(s, td, dst[1] + 4 * y * s->uvlinesize + x * 4,
1955  dst[2] + 4 * y * s->uvlinesize + x * 4, ref,
1956  &uvmv, 4 * x + x_off, 4 * y + y_off, 4, 4,
1957  width, height, s->uvlinesize,
1958  s->put_pixels_tab[2]);
1959  }
1960  }
1961  break;
1962  }
1963  case VP8_SPLITMVMODE_16x8:
1964  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1965  0, 0, 16, 8, width, height, &bmv[0]);
1966  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1967  0, 8, 16, 8, width, height, &bmv[1]);
1968  break;
1969  case VP8_SPLITMVMODE_8x16:
1970  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1971  0, 0, 8, 16, width, height, &bmv[0]);
1972  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1973  8, 0, 8, 16, width, height, &bmv[1]);
1974  break;
1975  case VP8_SPLITMVMODE_8x8:
1976  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1977  0, 0, 8, 8, width, height, &bmv[0]);
1978  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1979  8, 0, 8, 8, width, height, &bmv[1]);
1980  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1981  0, 8, 8, 8, width, height, &bmv[2]);
1982  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1983  8, 8, 8, 8, width, height, &bmv[3]);
1984  break;
1985  }
1986 }
1987 
1988 static av_always_inline
1990 {
1991  int x, y, ch;
1992 
1993  if (mb->mode != MODE_I4x4) {
1994  uint8_t *y_dst = dst[0];
1995  for (y = 0; y < 4; y++) {
1996  uint32_t nnz4 = AV_RL32(td->non_zero_count_cache[y]);
1997  if (nnz4) {
1998  if (nnz4 & ~0x01010101) {
1999  for (x = 0; x < 4; x++) {
2000  if ((uint8_t) nnz4 == 1)
2001  s->vp8dsp.vp8_idct_dc_add(y_dst + 4 * x,
2002  td->block[y][x],
2003  s->linesize);
2004  else if ((uint8_t) nnz4 > 1)
2005  s->vp8dsp.vp8_idct_add(y_dst + 4 * x,
2006  td->block[y][x],
2007  s->linesize);
2008  nnz4 >>= 8;
2009  if (!nnz4)
2010  break;
2011  }
2012  } else {
2013  s->vp8dsp.vp8_idct_dc_add4y(y_dst, td->block[y], s->linesize);
2014  }
2015  }
2016  y_dst += 4 * s->linesize;
2017  }
2018  }
2019 
2020  for (ch = 0; ch < 2; ch++) {
2021  uint32_t nnz4 = AV_RL32(td->non_zero_count_cache[4 + ch]);
2022  if (nnz4) {
2023  uint8_t *ch_dst = dst[1 + ch];
2024  if (nnz4 & ~0x01010101) {
2025  for (y = 0; y < 2; y++) {
2026  for (x = 0; x < 2; x++) {
2027  if ((uint8_t) nnz4 == 1)
2028  s->vp8dsp.vp8_idct_dc_add(ch_dst + 4 * x,
2029  td->block[4 + ch][(y << 1) + x],
2030  s->uvlinesize);
2031  else if ((uint8_t) nnz4 > 1)
2032  s->vp8dsp.vp8_idct_add(ch_dst + 4 * x,
2033  td->block[4 + ch][(y << 1) + x],
2034  s->uvlinesize);
2035  nnz4 >>= 8;
2036  if (!nnz4)
2037  goto chroma_idct_end;
2038  }
2039  ch_dst += 4 * s->uvlinesize;
2040  }
2041  } else {
2042  s->vp8dsp.vp8_idct_dc_add4uv(ch_dst, td->block[4 + ch], s->uvlinesize);
2043  }
2044  }
2045 chroma_idct_end:
2046  ;
2047  }
2048 }
2049 
2050 static av_always_inline
2052  VP8FilterStrength *f, int is_vp7)
2053 {
2054  int interior_limit, filter_level;
2055 
2056  if (s->segmentation.enabled) {
2057  filter_level = s->segmentation.filter_level[mb->segment];
2058  if (!s->segmentation.absolute_vals)
2059  filter_level += s->filter.level;
2060  } else
2061  filter_level = s->filter.level;
2062 
2063  if (s->lf_delta.enabled) {
2064  filter_level += s->lf_delta.ref[mb->ref_frame];
2065  filter_level += s->lf_delta.mode[mb->mode];
2066  }
2067 
2068  filter_level = av_clip_uintp2(filter_level, 6);
2069 
2070  interior_limit = filter_level;
2071  if (s->filter.sharpness) {
2072  interior_limit >>= (s->filter.sharpness + 3) >> 2;
2073  interior_limit = FFMIN(interior_limit, 9 - s->filter.sharpness);
2074  }
2075  interior_limit = FFMAX(interior_limit, 1);
2076 
2077  f->filter_level = filter_level;
2078  f->inner_limit = interior_limit;
2079  f->inner_filter = is_vp7 || !mb->skip || mb->mode == MODE_I4x4 ||
2080  mb->mode == VP8_MVMODE_SPLIT;
2081 }
2082 
2083 static av_always_inline
2085  int mb_x, int mb_y, int is_vp7)
2086 {
2087  int mbedge_lim, bedge_lim_y, bedge_lim_uv, hev_thresh;
2088  int filter_level = f->filter_level;
2089  int inner_limit = f->inner_limit;
2090  int inner_filter = f->inner_filter;
2091  ptrdiff_t linesize = s->linesize;
2092  ptrdiff_t uvlinesize = s->uvlinesize;
2093  static const uint8_t hev_thresh_lut[2][64] = {
2094  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
2095  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2096  3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
2097  3, 3, 3, 3 },
2098  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
2099  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
2100  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2101  2, 2, 2, 2 }
2102  };
2103 
2104  if (!filter_level)
2105  return;
2106 
2107  if (is_vp7) {
2108  bedge_lim_y = filter_level;
2109  bedge_lim_uv = filter_level * 2;
2110  mbedge_lim = filter_level + 2;
2111  } else {
2112  bedge_lim_y =
2113  bedge_lim_uv = filter_level * 2 + inner_limit;
2114  mbedge_lim = bedge_lim_y + 4;
2115  }
2116 
2117  hev_thresh = hev_thresh_lut[s->keyframe][filter_level];
2118 
2119  if (mb_x) {
2120  s->vp8dsp.vp8_h_loop_filter16y(dst[0], linesize,
2121  mbedge_lim, inner_limit, hev_thresh);
2122  s->vp8dsp.vp8_h_loop_filter8uv(dst[1], dst[2], uvlinesize,
2123  mbedge_lim, inner_limit, hev_thresh);
2124  }
2125 
2126 #define H_LOOP_FILTER_16Y_INNER(cond) \
2127  if (cond && inner_filter) { \
2128  s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0] + 4, linesize, \
2129  bedge_lim_y, inner_limit, \
2130  hev_thresh); \
2131  s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0] + 8, linesize, \
2132  bedge_lim_y, inner_limit, \
2133  hev_thresh); \
2134  s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0] + 12, linesize, \
2135  bedge_lim_y, inner_limit, \
2136  hev_thresh); \
2137  s->vp8dsp.vp8_h_loop_filter8uv_inner(dst[1] + 4, dst[2] + 4, \
2138  uvlinesize, bedge_lim_uv, \
2139  inner_limit, hev_thresh); \
2140  }
2141 
2142  H_LOOP_FILTER_16Y_INNER(!is_vp7)
2143 
2144  if (mb_y) {
2145  s->vp8dsp.vp8_v_loop_filter16y(dst[0], linesize,
2146  mbedge_lim, inner_limit, hev_thresh);
2147  s->vp8dsp.vp8_v_loop_filter8uv(dst[1], dst[2], uvlinesize,
2148  mbedge_lim, inner_limit, hev_thresh);
2149  }
2150 
2151  if (inner_filter) {
2152  s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0] + 4 * linesize,
2153  linesize, bedge_lim_y,
2154  inner_limit, hev_thresh);
2155  s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0] + 8 * linesize,
2156  linesize, bedge_lim_y,
2157  inner_limit, hev_thresh);
2158  s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0] + 12 * linesize,
2159  linesize, bedge_lim_y,
2160  inner_limit, hev_thresh);
2161  s->vp8dsp.vp8_v_loop_filter8uv_inner(dst[1] + 4 * uvlinesize,
2162  dst[2] + 4 * uvlinesize,
2163  uvlinesize, bedge_lim_uv,
2164  inner_limit, hev_thresh);
2165  }
2166 
2167  H_LOOP_FILTER_16Y_INNER(is_vp7)
2168 }
2169 
2170 static av_always_inline
2172  int mb_x, int mb_y)
2173 {
2174  int mbedge_lim, bedge_lim;
2175  int filter_level = f->filter_level;
2176  int inner_limit = f->inner_limit;
2177  int inner_filter = f->inner_filter;
2178  ptrdiff_t linesize = s->linesize;
2179 
2180  if (!filter_level)
2181  return;
2182 
2183  bedge_lim = 2 * filter_level + inner_limit;
2184  mbedge_lim = bedge_lim + 4;
2185 
2186  if (mb_x)
2187  s->vp8dsp.vp8_h_loop_filter_simple(dst, linesize, mbedge_lim);
2188  if (inner_filter) {
2189  s->vp8dsp.vp8_h_loop_filter_simple(dst + 4, linesize, bedge_lim);
2190  s->vp8dsp.vp8_h_loop_filter_simple(dst + 8, linesize, bedge_lim);
2191  s->vp8dsp.vp8_h_loop_filter_simple(dst + 12, linesize, bedge_lim);
2192  }
2193 
2194  if (mb_y)
2195  s->vp8dsp.vp8_v_loop_filter_simple(dst, linesize, mbedge_lim);
2196  if (inner_filter) {
2197  s->vp8dsp.vp8_v_loop_filter_simple(dst + 4 * linesize, linesize, bedge_lim);
2198  s->vp8dsp.vp8_v_loop_filter_simple(dst + 8 * linesize, linesize, bedge_lim);
2199  s->vp8dsp.vp8_v_loop_filter_simple(dst + 12 * linesize, linesize, bedge_lim);
2200  }
2201 }
2202 
2203 #define MARGIN (16 << 2)
2204 static av_always_inline
2206  VP8Frame *prev_frame, int is_vp7)
2207 {
2208  VP8Context *s = avctx->priv_data;
2209  int mb_x, mb_y;
2210 
2211  s->mv_bounds.mv_min.y = -MARGIN;
2212  s->mv_bounds.mv_max.y = ((s->mb_height - 1) << 6) + MARGIN;
2213  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
2215  ((s->mb_width + 1) * (mb_y + 1) + 1);
2216  int mb_xy = mb_y * s->mb_width;
2217 
2218  AV_WN32A(s->intra4x4_pred_mode_left, DC_PRED * 0x01010101);
2219 
2220  s->mv_bounds.mv_min.x = -MARGIN;
2221  s->mv_bounds.mv_max.x = ((s->mb_width - 1) << 6) + MARGIN;
2222  for (mb_x = 0; mb_x < s->mb_width; mb_x++, mb_xy++, mb++) {
2223  if (mb_y == 0)
2224  AV_WN32A((mb - s->mb_width - 1)->intra4x4_pred_mode_top,
2225  DC_PRED * 0x01010101);
2226  decode_mb_mode(s, &s->mv_bounds, mb, mb_x, mb_y, curframe->seg_map->data + mb_xy,
2227  prev_frame && prev_frame->seg_map ?
2228  prev_frame->seg_map->data + mb_xy : NULL, 1, is_vp7);
2229  s->mv_bounds.mv_min.x -= 64;
2230  s->mv_bounds.mv_max.x -= 64;
2231  }
2232  s->mv_bounds.mv_min.y -= 64;
2233  s->mv_bounds.mv_max.y -= 64;
2234  }
2235 }
2236 
2237 static void vp7_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *cur_frame,
2238  VP8Frame *prev_frame)
2239 {
2240  vp78_decode_mv_mb_modes(avctx, cur_frame, prev_frame, IS_VP7);
2241 }
2242 
2243 static void vp8_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *cur_frame,
2244  VP8Frame *prev_frame)
2245 {
2246  vp78_decode_mv_mb_modes(avctx, cur_frame, prev_frame, IS_VP8);
2247 }
2248 
2249 #if HAVE_THREADS
2250 #define check_thread_pos(td, otd, mb_x_check, mb_y_check) \
2251  do { \
2252  int tmp = (mb_y_check << 16) | (mb_x_check & 0xFFFF); \
2253  if (atomic_load(&otd->thread_mb_pos) < tmp) { \
2254  pthread_mutex_lock(&otd->lock); \
2255  atomic_store(&td->wait_mb_pos, tmp); \
2256  do { \
2257  if (atomic_load(&otd->thread_mb_pos) >= tmp) \
2258  break; \
2259  pthread_cond_wait(&otd->cond, &otd->lock); \
2260  } while (1); \
2261  atomic_store(&td->wait_mb_pos, INT_MAX); \
2262  pthread_mutex_unlock(&otd->lock); \
2263  } \
2264  } while (0)
2265 
2266 #define update_pos(td, mb_y, mb_x) \
2267  do { \
2268  int pos = (mb_y << 16) | (mb_x & 0xFFFF); \
2269  int sliced_threading = (avctx->active_thread_type == FF_THREAD_SLICE) && \
2270  (num_jobs > 1); \
2271  int is_null = !next_td || !prev_td; \
2272  int pos_check = (is_null) ? 1 : \
2273  (next_td != td && pos >= atomic_load(&next_td->wait_mb_pos)) || \
2274  (prev_td != td && pos >= atomic_load(&prev_td->wait_mb_pos)); \
2275  atomic_store(&td->thread_mb_pos, pos); \
2276  if (sliced_threading && pos_check) { \
2277  pthread_mutex_lock(&td->lock); \
2278  pthread_cond_broadcast(&td->cond); \
2279  pthread_mutex_unlock(&td->lock); \
2280  } \
2281  } while (0)
2282 #else
2283 #define check_thread_pos(td, otd, mb_x_check, mb_y_check) while(0)
2284 #define update_pos(td, mb_y, mb_x) while(0)
2285 #endif
2286 
2288  int jobnr, int threadnr, int is_vp7)
2289 {
2290  VP8Context *s = avctx->priv_data;
2291  VP8ThreadData *prev_td, *next_td, *td = &s->thread_data[threadnr];
2292  int mb_y = atomic_load(&td->thread_mb_pos) >> 16;
2293  int mb_x, mb_xy = mb_y * s->mb_width;
2294  int num_jobs = s->num_jobs;
2295  VP8Frame *curframe = s->curframe, *prev_frame = s->prev_frame;
2296  VP56RangeCoder *c = &s->coeff_partition[mb_y & (s->num_coeff_partitions - 1)];
2297  VP8Macroblock *mb;
2298  uint8_t *dst[3] = {
2299  curframe->tf.f->data[0] + 16 * mb_y * s->linesize,
2300  curframe->tf.f->data[1] + 8 * mb_y * s->uvlinesize,
2301  curframe->tf.f->data[2] + 8 * mb_y * s->uvlinesize
2302  };
2303 
2304  if (vpX_rac_is_end(c))
2305  return AVERROR_INVALIDDATA;
2306 
2307  if (mb_y == 0)
2308  prev_td = td;
2309  else
2310  prev_td = &s->thread_data[(jobnr + num_jobs - 1) % num_jobs];
2311  if (mb_y == s->mb_height - 1)
2312  next_td = td;
2313  else
2314  next_td = &s->thread_data[(jobnr + 1) % num_jobs];
2315  if (s->mb_layout == 1)
2316  mb = s->macroblocks_base + ((s->mb_width + 1) * (mb_y + 1) + 1);
2317  else {
2318  // Make sure the previous frame has read its segmentation map,
2319  // if we re-use the same map.
2320  if (prev_frame && s->segmentation.enabled &&
2322  ff_thread_await_progress(&prev_frame->tf, mb_y, 0);
2323  mb = s->macroblocks + (s->mb_height - mb_y - 1) * 2;
2324  memset(mb - 1, 0, sizeof(*mb)); // zero left macroblock
2325  AV_WN32A(s->intra4x4_pred_mode_left, DC_PRED * 0x01010101);
2326  }
2327 
2328  if (!is_vp7 || mb_y == 0)
2329  memset(td->left_nnz, 0, sizeof(td->left_nnz));
2330 
2331  td->mv_bounds.mv_min.x = -MARGIN;
2332  td->mv_bounds.mv_max.x = ((s->mb_width - 1) << 6) + MARGIN;
2333 
2334  for (mb_x = 0; mb_x < s->mb_width; mb_x++, mb_xy++, mb++) {
2335  if (vpX_rac_is_end(c))
2336  return AVERROR_INVALIDDATA;
2337  // Wait for previous thread to read mb_x+2, and reach mb_y-1.
2338  if (prev_td != td) {
2339  if (threadnr != 0) {
2340  check_thread_pos(td, prev_td,
2341  mb_x + (is_vp7 ? 2 : 1),
2342  mb_y - (is_vp7 ? 2 : 1));
2343  } else {
2344  check_thread_pos(td, prev_td,
2345  mb_x + (is_vp7 ? 2 : 1) + s->mb_width + 3,
2346  mb_y - (is_vp7 ? 2 : 1));
2347  }
2348  }
2349 
2350  s->vdsp.prefetch(dst[0] + (mb_x & 3) * 4 * s->linesize + 64,
2351  s->linesize, 4);
2352  s->vdsp.prefetch(dst[1] + (mb_x & 7) * s->uvlinesize + 64,
2353  dst[2] - dst[1], 2);
2354 
2355  if (!s->mb_layout)
2356  decode_mb_mode(s, &td->mv_bounds, mb, mb_x, mb_y, curframe->seg_map->data + mb_xy,
2357  prev_frame && prev_frame->seg_map ?
2358  prev_frame->seg_map->data + mb_xy : NULL, 0, is_vp7);
2359 
2360  prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_PREVIOUS);
2361 
2362  if (!mb->skip)
2363  decode_mb_coeffs(s, td, c, mb, s->top_nnz[mb_x], td->left_nnz, is_vp7);
2364 
2365  if (mb->mode <= MODE_I4x4)
2366  intra_predict(s, td, dst, mb, mb_x, mb_y, is_vp7);
2367  else
2368  inter_predict(s, td, dst, mb, mb_x, mb_y);
2369 
2370  prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_GOLDEN);
2371 
2372  if (!mb->skip) {
2373  idct_mb(s, td, dst, mb);
2374  } else {
2375  AV_ZERO64(td->left_nnz);
2376  AV_WN64(s->top_nnz[mb_x], 0); // array of 9, so unaligned
2377 
2378  /* Reset DC block predictors if they would exist
2379  * if the mb had coefficients */
2380  if (mb->mode != MODE_I4x4 && mb->mode != VP8_MVMODE_SPLIT) {
2381  td->left_nnz[8] = 0;
2382  s->top_nnz[mb_x][8] = 0;
2383  }
2384  }
2385 
2386  if (s->deblock_filter)
2387  filter_level_for_mb(s, mb, &td->filter_strength[mb_x], is_vp7);
2388 
2389  if (s->deblock_filter && num_jobs != 1 && threadnr == num_jobs - 1) {
2390  if (s->filter.simple)
2391  backup_mb_border(s->top_border[mb_x + 1], dst[0],
2392  NULL, NULL, s->linesize, 0, 1);
2393  else
2394  backup_mb_border(s->top_border[mb_x + 1], dst[0],
2395  dst[1], dst[2], s->linesize, s->uvlinesize, 0);
2396  }
2397 
2398  prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_GOLDEN2);
2399 
2400  dst[0] += 16;
2401  dst[1] += 8;
2402  dst[2] += 8;
2403  td->mv_bounds.mv_min.x -= 64;
2404  td->mv_bounds.mv_max.x -= 64;
2405 
2406  if (mb_x == s->mb_width + 1) {
2407  update_pos(td, mb_y, s->mb_width + 3);
2408  } else {
2409  update_pos(td, mb_y, mb_x);
2410  }
2411  }
2412  return 0;
2413 }
2414 
2415 static int vp7_decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata,
2416  int jobnr, int threadnr)
2417 {
2418  return decode_mb_row_no_filter(avctx, tdata, jobnr, threadnr, 1);
2419 }
2420 
2421 static int vp8_decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata,
2422  int jobnr, int threadnr)
2423 {
2424  return decode_mb_row_no_filter(avctx, tdata, jobnr, threadnr, 0);
2425 }
2426 
2427 static av_always_inline void filter_mb_row(AVCodecContext *avctx, void *tdata,
2428  int jobnr, int threadnr, int is_vp7)
2429 {
2430  VP8Context *s = avctx->priv_data;
2431  VP8ThreadData *td = &s->thread_data[threadnr];
2432  int mb_x, mb_y = atomic_load(&td->thread_mb_pos) >> 16, num_jobs = s->num_jobs;
2433  AVFrame *curframe = s->curframe->tf.f;
2434  VP8Macroblock *mb;
2435  VP8ThreadData *prev_td, *next_td;
2436  uint8_t *dst[3] = {
2437  curframe->data[0] + 16 * mb_y * s->linesize,
2438  curframe->data[1] + 8 * mb_y * s->uvlinesize,
2439  curframe->data[2] + 8 * mb_y * s->uvlinesize
2440  };
2441 
2442  if (s->mb_layout == 1)
2443  mb = s->macroblocks_base + ((s->mb_width + 1) * (mb_y + 1) + 1);
2444  else
2445  mb = s->macroblocks + (s->mb_height - mb_y - 1) * 2;
2446 
2447  if (mb_y == 0)
2448  prev_td = td;
2449  else
2450  prev_td = &s->thread_data[(jobnr + num_jobs - 1) % num_jobs];
2451  if (mb_y == s->mb_height - 1)
2452  next_td = td;
2453  else
2454  next_td = &s->thread_data[(jobnr + 1) % num_jobs];
2455 
2456  for (mb_x = 0; mb_x < s->mb_width; mb_x++, mb++) {
2457  VP8FilterStrength *f = &td->filter_strength[mb_x];
2458  if (prev_td != td)
2459  check_thread_pos(td, prev_td,
2460  (mb_x + 1) + (s->mb_width + 3), mb_y - 1);
2461  if (next_td != td)
2462  if (next_td != &s->thread_data[0])
2463  check_thread_pos(td, next_td, mb_x + 1, mb_y + 1);
2464 
2465  if (num_jobs == 1) {
2466  if (s->filter.simple)
2467  backup_mb_border(s->top_border[mb_x + 1], dst[0],
2468  NULL, NULL, s->linesize, 0, 1);
2469  else
2470  backup_mb_border(s->top_border[mb_x + 1], dst[0],
2471  dst[1], dst[2], s->linesize, s->uvlinesize, 0);
2472  }
2473 
2474  if (s->filter.simple)
2475  filter_mb_simple(s, dst[0], f, mb_x, mb_y);
2476  else
2477  filter_mb(s, dst, f, mb_x, mb_y, is_vp7);
2478  dst[0] += 16;
2479  dst[1] += 8;
2480  dst[2] += 8;
2481 
2482  update_pos(td, mb_y, (s->mb_width + 3) + mb_x);
2483  }
2484 }
2485 
2486 static void vp7_filter_mb_row(AVCodecContext *avctx, void *tdata,
2487  int jobnr, int threadnr)
2488 {
2489  filter_mb_row(avctx, tdata, jobnr, threadnr, 1);
2490 }
2491 
2492 static void vp8_filter_mb_row(AVCodecContext *avctx, void *tdata,
2493  int jobnr, int threadnr)
2494 {
2495  filter_mb_row(avctx, tdata, jobnr, threadnr, 0);
2496 }
2497 
2498 static av_always_inline
2499 int vp78_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata, int jobnr,
2500  int threadnr, int is_vp7)
2501 {
2502  VP8Context *s = avctx->priv_data;
2503  VP8ThreadData *td = &s->thread_data[jobnr];
2504  VP8ThreadData *next_td = NULL, *prev_td = NULL;
2505  VP8Frame *curframe = s->curframe;
2506  int mb_y, num_jobs = s->num_jobs;
2507  int ret;
2508 
2509  td->thread_nr = threadnr;
2510  td->mv_bounds.mv_min.y = -MARGIN - 64 * threadnr;
2511  td->mv_bounds.mv_max.y = ((s->mb_height - 1) << 6) + MARGIN - 64 * threadnr;
2512  for (mb_y = jobnr; mb_y < s->mb_height; mb_y += num_jobs) {
2513  atomic_store(&td->thread_mb_pos, mb_y << 16);
2514  ret = s->decode_mb_row_no_filter(avctx, tdata, jobnr, threadnr);
2515  if (ret < 0) {
2516  update_pos(td, s->mb_height, INT_MAX & 0xFFFF);
2517  return ret;
2518  }
2519  if (s->deblock_filter)
2520  s->filter_mb_row(avctx, tdata, jobnr, threadnr);
2521  update_pos(td, mb_y, INT_MAX & 0xFFFF);
2522 
2523  td->mv_bounds.mv_min.y -= 64 * num_jobs;
2524  td->mv_bounds.mv_max.y -= 64 * num_jobs;
2525 
2526  if (avctx->active_thread_type == FF_THREAD_FRAME)
2527  ff_thread_report_progress(&curframe->tf, mb_y, 0);
2528  }
2529 
2530  return 0;
2531 }
2532 
2533 static int vp7_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata,
2534  int jobnr, int threadnr)
2535 {
2536  return vp78_decode_mb_row_sliced(avctx, tdata, jobnr, threadnr, IS_VP7);
2537 }
2538 
2539 static int vp8_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata,
2540  int jobnr, int threadnr)
2541 {
2542  return vp78_decode_mb_row_sliced(avctx, tdata, jobnr, threadnr, IS_VP8);
2543 }
2544 
2545 
2546 static av_always_inline
2547 int vp78_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
2548  AVPacket *avpkt, int is_vp7)
2549 {
2550  VP8Context *s = avctx->priv_data;
2551  int ret, i, referenced, num_jobs;
2552  enum AVDiscard skip_thresh;
2553  VP8Frame *av_uninit(curframe), *prev_frame;
2554 
2556 
2557  if (is_vp7)
2558  ret = vp7_decode_frame_header(s, avpkt->data, avpkt->size);
2559  else
2560  ret = vp8_decode_frame_header(s, avpkt->data, avpkt->size);
2561 
2562  if (ret < 0)
2563  goto err;
2564 
2565  prev_frame = s->framep[VP56_FRAME_CURRENT];
2566 
2567  referenced = s->update_last || s->update_golden == VP56_FRAME_CURRENT ||
2569 
2570  skip_thresh = !referenced ? AVDISCARD_NONREF
2571  : !s->keyframe ? AVDISCARD_NONKEY
2572  : AVDISCARD_ALL;
2573 
2574  if (avctx->skip_frame >= skip_thresh) {
2575  s->invisible = 1;
2576  memcpy(&s->next_framep[0], &s->framep[0], sizeof(s->framep[0]) * 4);
2577  goto skip_decode;
2578  }
2579  s->deblock_filter = s->filter.level && avctx->skip_loop_filter < skip_thresh;
2580 
2581  // release no longer referenced frames
2582  for (i = 0; i < 5; i++)
2583  if (s->frames[i].tf.f->data[0] &&
2584  &s->frames[i] != prev_frame &&
2585  &s->frames[i] != s->framep[VP56_FRAME_PREVIOUS] &&
2586  &s->frames[i] != s->framep[VP56_FRAME_GOLDEN] &&
2587  &s->frames[i] != s->framep[VP56_FRAME_GOLDEN2])
2588  vp8_release_frame(s, &s->frames[i]);
2589 
2590  curframe = s->framep[VP56_FRAME_CURRENT] = vp8_find_free_buffer(s);
2591 
2592  if (!s->colorspace)
2593  avctx->colorspace = AVCOL_SPC_BT470BG;
2594  if (s->fullrange)
2595  avctx->color_range = AVCOL_RANGE_JPEG;
2596  else
2597  avctx->color_range = AVCOL_RANGE_MPEG;
2598 
2599  /* Given that arithmetic probabilities are updated every frame, it's quite
2600  * likely that the values we have on a random interframe are complete
2601  * junk if we didn't start decode on a keyframe. So just don't display
2602  * anything rather than junk. */
2603  if (!s->keyframe && (!s->framep[VP56_FRAME_PREVIOUS] ||
2604  !s->framep[VP56_FRAME_GOLDEN] ||
2605  !s->framep[VP56_FRAME_GOLDEN2])) {
2606  av_log(avctx, AV_LOG_WARNING,
2607  "Discarding interframe without a prior keyframe!\n");
2608  ret = AVERROR_INVALIDDATA;
2609  goto err;
2610  }
2611 
2612  curframe->tf.f->key_frame = s->keyframe;
2613  curframe->tf.f->pict_type = s->keyframe ? AV_PICTURE_TYPE_I
2615  if ((ret = vp8_alloc_frame(s, curframe, referenced)) < 0)
2616  goto err;
2617 
2618  // check if golden and altref are swapped
2619  if (s->update_altref != VP56_FRAME_NONE)
2621  else
2623 
2624  if (s->update_golden != VP56_FRAME_NONE)
2626  else
2628 
2629  if (s->update_last)
2630  s->next_framep[VP56_FRAME_PREVIOUS] = curframe;
2631  else
2633 
2634  s->next_framep[VP56_FRAME_CURRENT] = curframe;
2635 
2636  if (avctx->codec->update_thread_context)
2637  ff_thread_finish_setup(avctx);
2638 
2639  s->linesize = curframe->tf.f->linesize[0];
2640  s->uvlinesize = curframe->tf.f->linesize[1];
2641 
2642  memset(s->top_nnz, 0, s->mb_width * sizeof(*s->top_nnz));
2643  /* Zero macroblock structures for top/top-left prediction
2644  * from outside the frame. */
2645  if (!s->mb_layout)
2646  memset(s->macroblocks + s->mb_height * 2 - 1, 0,
2647  (s->mb_width + 1) * sizeof(*s->macroblocks));
2648  if (!s->mb_layout && s->keyframe)
2649  memset(s->intra4x4_pred_mode_top, DC_PRED, s->mb_width * 4);
2650 
2651  memset(s->ref_count, 0, sizeof(s->ref_count));
2652 
2653  if (s->mb_layout == 1) {
2654  // Make sure the previous frame has read its segmentation map,
2655  // if we re-use the same map.
2656  if (prev_frame && s->segmentation.enabled &&
2658  ff_thread_await_progress(&prev_frame->tf, 1, 0);
2659  if (is_vp7)
2660  vp7_decode_mv_mb_modes(avctx, curframe, prev_frame);
2661  else
2662  vp8_decode_mv_mb_modes(avctx, curframe, prev_frame);
2663  }
2664 
2665  if (avctx->active_thread_type == FF_THREAD_FRAME)
2666  num_jobs = 1;
2667  else
2668  num_jobs = FFMIN(s->num_coeff_partitions, avctx->thread_count);
2669  s->num_jobs = num_jobs;
2670  s->curframe = curframe;
2671  s->prev_frame = prev_frame;
2672  s->mv_bounds.mv_min.y = -MARGIN;
2673  s->mv_bounds.mv_max.y = ((s->mb_height - 1) << 6) + MARGIN;
2674  for (i = 0; i < MAX_THREADS; i++) {
2675  VP8ThreadData *td = &s->thread_data[i];
2676  atomic_init(&td->thread_mb_pos, 0);
2677  atomic_init(&td->wait_mb_pos, INT_MAX);
2678  }
2679  if (is_vp7)
2681  num_jobs);
2682  else
2684  num_jobs);
2685 
2686  ff_thread_report_progress(&curframe->tf, INT_MAX, 0);
2687  memcpy(&s->framep[0], &s->next_framep[0], sizeof(s->framep[0]) * 4);
2688 
2689 skip_decode:
2690  // if future frames don't use the updated probabilities,
2691  // reset them to the values we saved
2692  if (!s->update_probabilities)
2693  s->prob[0] = s->prob[1];
2694 
2695  if (!s->invisible) {
2696  if ((ret = av_frame_ref(data, curframe->tf.f)) < 0)
2697  return ret;
2698  *got_frame = 1;
2699  }
2700 
2701  return avpkt->size;
2702 err:
2703  memcpy(&s->next_framep[0], &s->framep[0], sizeof(s->framep[0]) * 4);
2704  return ret;
2705 }
2706 
2707 int ff_vp8_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
2708  AVPacket *avpkt)
2709 {
2710  return vp78_decode_frame(avctx, data, got_frame, avpkt, IS_VP8);
2711 }
2712 
2713 #if CONFIG_VP7_DECODER
2714 static int vp7_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
2715  AVPacket *avpkt)
2716 {
2717  return vp78_decode_frame(avctx, data, got_frame, avpkt, IS_VP7);
2718 }
2719 #endif /* CONFIG_VP7_DECODER */
2720 
2722 {
2723  VP8Context *s = avctx->priv_data;
2724  int i;
2725 
2726  if (!s)
2727  return 0;
2728 
2729  vp8_decode_flush_impl(avctx, 1);
2730  for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++)
2731  av_frame_free(&s->frames[i].tf.f);
2732 
2733  return 0;
2734 }
2735 
2737 {
2738  int i;
2739  for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++) {
2740  s->frames[i].tf.f = av_frame_alloc();
2741  if (!s->frames[i].tf.f)
2742  return AVERROR(ENOMEM);
2743  }
2744  return 0;
2745 }
2746 
2747 static av_always_inline
2748 int vp78_decode_init(AVCodecContext *avctx, int is_vp7)
2749 {
2750  VP8Context *s = avctx->priv_data;
2751  int ret;
2752 
2753  s->avctx = avctx;
2754  s->vp7 = avctx->codec->id == AV_CODEC_ID_VP7;
2755  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
2756  avctx->internal->allocate_progress = 1;
2757 
2758  ff_videodsp_init(&s->vdsp, 8);
2759 
2760  ff_vp78dsp_init(&s->vp8dsp);
2761  if (CONFIG_VP7_DECODER && is_vp7) {
2763  ff_vp7dsp_init(&s->vp8dsp);
2766  } else if (CONFIG_VP8_DECODER && !is_vp7) {
2768  ff_vp8dsp_init(&s->vp8dsp);
2771  }
2772 
2773  /* does not change for VP8 */
2774  memcpy(s->prob[0].scan, ff_zigzag_scan, sizeof(s->prob[0].scan));
2775 
2776  if ((ret = vp8_init_frames(s)) < 0) {
2777  ff_vp8_decode_free(avctx);
2778  return ret;
2779  }
2780 
2781  return 0;
2782 }
2783 
2784 #if CONFIG_VP7_DECODER
2785 static int vp7_decode_init(AVCodecContext *avctx)
2786 {
2787  return vp78_decode_init(avctx, IS_VP7);
2788 }
2789 #endif /* CONFIG_VP7_DECODER */
2790 
2792 {
2793  return vp78_decode_init(avctx, IS_VP8);
2794 }
2795 
2796 #if CONFIG_VP8_DECODER
2797 #if HAVE_THREADS
2798 static av_cold int vp8_decode_init_thread_copy(AVCodecContext *avctx)
2799 {
2800  VP8Context *s = avctx->priv_data;
2801  int ret;
2802 
2803  s->avctx = avctx;
2804 
2805  if ((ret = vp8_init_frames(s)) < 0) {
2806  ff_vp8_decode_free(avctx);
2807  return ret;
2808  }
2809 
2810  return 0;
2811 }
2812 
2813 #define REBASE(pic) ((pic) ? (pic) - &s_src->frames[0] + &s->frames[0] : NULL)
2814 
2815 static int vp8_decode_update_thread_context(AVCodecContext *dst,
2816  const AVCodecContext *src)
2817 {
2818  VP8Context *s = dst->priv_data, *s_src = src->priv_data;
2819  int i;
2820 
2821  if (s->macroblocks_base &&
2822  (s_src->mb_width != s->mb_width || s_src->mb_height != s->mb_height)) {
2823  free_buffers(s);
2824  s->mb_width = s_src->mb_width;
2825  s->mb_height = s_src->mb_height;
2826  }
2827 
2828  s->prob[0] = s_src->prob[!s_src->update_probabilities];
2829  s->segmentation = s_src->segmentation;
2830  s->lf_delta = s_src->lf_delta;
2831  memcpy(s->sign_bias, s_src->sign_bias, sizeof(s->sign_bias));
2832 
2833  for (i = 0; i < FF_ARRAY_ELEMS(s_src->frames); i++) {
2834  if (s_src->frames[i].tf.f->data[0]) {
2835  int ret = vp8_ref_frame(s, &s->frames[i], &s_src->frames[i]);
2836  if (ret < 0)
2837  return ret;
2838  }
2839  }
2840 
2841  s->framep[0] = REBASE(s_src->next_framep[0]);
2842  s->framep[1] = REBASE(s_src->next_framep[1]);
2843  s->framep[2] = REBASE(s_src->next_framep[2]);
2844  s->framep[3] = REBASE(s_src->next_framep[3]);
2845 
2846  return 0;
2847 }
2848 #endif /* HAVE_THREADS */
2849 #endif /* CONFIG_VP8_DECODER */
2850 
2851 #if CONFIG_VP7_DECODER
2852 AVCodec ff_vp7_decoder = {
2853  .name = "vp7",
2854  .long_name = NULL_IF_CONFIG_SMALL("On2 VP7"),
2855  .type = AVMEDIA_TYPE_VIDEO,
2856  .id = AV_CODEC_ID_VP7,
2857  .priv_data_size = sizeof(VP8Context),
2858  .init = vp7_decode_init,
2859  .close = ff_vp8_decode_free,
2860  .decode = vp7_decode_frame,
2861  .capabilities = AV_CODEC_CAP_DR1,
2863 };
2864 #endif /* CONFIG_VP7_DECODER */
2865 
2866 #if CONFIG_VP8_DECODER
2867 AVCodec ff_vp8_decoder = {
2868  .name = "vp8",
2869  .long_name = NULL_IF_CONFIG_SMALL("On2 VP8"),
2870  .type = AVMEDIA_TYPE_VIDEO,
2871  .id = AV_CODEC_ID_VP8,
2872  .priv_data_size = sizeof(VP8Context),
2874  .close = ff_vp8_decode_free,
2876  .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS |
2879  .init_thread_copy = ONLY_IF_THREADS_ENABLED(vp8_decode_init_thread_copy),
2880  .update_thread_context = ONLY_IF_THREADS_ENABLED(vp8_decode_update_thread_context),
2881 };
2882 #endif /* CONFIG_VP7_DECODER */
uint8_t golden
Definition: vp8.h:242
atomic_int wait_mb_pos
Definition: vp8.h:130
uint8_t inner_limit
Definition: vp8.h:77
#define VERT_PRED8x8
Definition: h264pred.h:70
VP8Macroblock * macroblocks
Definition: vp8.h:185
static const uint8_t vp8_dc_qlookup[VP8_MAX_QUANT+1]
Definition: vp8data.h:711
static av_always_inline void intra_predict(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], VP8Macroblock *mb, int mb_x, int mb_y, int is_vp7)
Definition: vp8.c:1598
static const uint8_t vp8_submv_prob[5][3]
Definition: vp8data.h:153
static const uint16_t vp7_ydc_qlookup[]
Definition: vp8data.h:778
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:38
#define NULL
Definition: coverity.c:32
const struct AVCodec * codec
Definition: avcodec.h:1770
discard all frames except keyframes
Definition: avcodec.h:829
void(* vp8_idct_dc_add)(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
Definition: vp8dsp.h:41
Definition: vp9.h:47
const char * s
Definition: avisynth_c.h:768
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static const uint8_t vp7_mv_default_prob[2][17]
Definition: vp8data.h:744
#define DC_128_PRED8x8
Definition: h264pred.h:76
static av_always_inline int pthread_mutex_destroy(pthread_mutex_t *mutex)
Definition: os2threads.h:106
static void copy(const float *p1, float *p2, const int length)
(only used in prediction) no split MVs
Definition: vp8.h:72
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:125
int size
static float alpha(float a)
void ff_vp7dsp_init(VP8DSPContext *c)
static void update_lf_deltas(VP8Context *s)
Definition: vp8.c:236
This structure describes decoded (raw) audio or video data.
Definition: frame.h:201
#define atomic_store(object, desired)
Definition: stdatomic.h:85
static void flush(AVCodecContext *avctx)
static int vp8_decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.c:2421
static const uint8_t vp7_pred4x4_mode[]
Definition: vp8data.h:33
int8_t sign_bias[4]
one state [0, 1] per ref frame type
Definition: vp8.h:163
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:1963
static av_always_inline int inter_predict_dc(int16_t block[16], int16_t pred[2])
Definition: vp8.c:1323
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
static int init_thread_copy(AVCodecContext *avctx)
Definition: tta.c:392
void(* vp8_idct_dc_add4y)(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride)
Definition: vp8dsp.h:42
#define VP7_MV_PRED_COUNT
Definition: vp8data.h:68
AVFrame * f
Definition: thread.h:36
static av_always_inline int vp8_rac_get_tree(VP56RangeCoder *c, const int8_t(*tree)[2], const uint8_t *probs)
Definition: vp56.h:391
uint8_t feature_value[4][4]
Definition: vp8.h:308
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:211
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
ptrdiff_t linesize
Definition: vp8.h:154
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
Definition: pixfmt.h:494
#define avpriv_request_sample(...)
uint8_t * intra4x4_pred_mode_top
Definition: vp8.h:187
uint8_t mbskip_enabled
Definition: vp8.h:159
static VP56Frame ref_to_update(VP8Context *s, int update, VP56Frame ref)
Determine which buffers golden and altref should be updated with after this frame.
Definition: vp8.c:352
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:2498
static int vp8_decode_block_coeffs_internal(VP56RangeCoder *r, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS - 1], int i, uint8_t *token_prob, int16_t qmul[2])
Definition: vp8.c:1357
uint8_t scan[16]
Definition: vp8.h:247
int size
Definition: avcodec.h:1680
const char * b
Definition: vf_curves.c:113
static void vp8_decode_flush(AVCodecContext *avctx)
Definition: vp8.c:120
#define MARGIN
Definition: vp8.c:2203
vp8_mc_func put_vp8_bilinear_pixels_tab[3][3][3]
Definition: vp8dsp.h:81
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1989
VP56mv bmv[16]
Definition: vp8.h:93
#define AV_RL16
Definition: intreadwrite.h:42
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, int chroma_format_idc)
Set the intra prediction function pointers.
Definition: h264pred.c:411
void(* vp8_v_loop_filter8uv_inner)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:62
av_cold void ff_vp78dsp_init(VP8DSPContext *dsp)
Definition: vp8dsp.c:666
uint8_t inner_filter
Definition: vp8.h:78
static const int8_t vp8_pred8x8c_tree[3][2]
Definition: vp8data.h:180
uint8_t segmentid[3]
Definition: vp8.h:238
static const uint16_t vp7_y2dc_qlookup[]
Definition: vp8data.h:803
discard all
Definition: avcodec.h:830
void(* prefetch)(uint8_t *buf, ptrdiff_t stride, int h)
Prefetch memory into cache (if supported by hardware).
Definition: videodsp.h:76
void ff_thread_await_progress(ThreadFrame *f, int n, int field)
Wait for earlier decoding threads to finish reference pictures.
struct VP8Context::@142 filter
static void copy_chroma(AVFrame *dst, AVFrame *src, int width, int height)
Definition: vp8.c:427
#define src
Definition: vp8dsp.c:254
#define HOR_PRED8x8
Definition: h264pred.h:69
AVCodec.
Definition: avcodec.h:3739
#define CONFIG_VP7_DECODER
Definition: config.h:919
uint8_t sharpness
Definition: vp8.h:182
#define AV_WN32A(p, v)
Definition: intreadwrite.h:543
2 16x8 blocks (vertical)
Definition: vp8.h:68
#define AV_COPY32(d, s)
Definition: intreadwrite.h:591
static av_always_inline int pthread_cond_destroy(pthread_cond_t *cond)
Definition: os2threads.h:138
int update_probabilities
If this flag is not set, all the probability updates are discarded after this frame is decoded...
Definition: vp8.h:260
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:42
VP8intmv mv_min
Definition: vp8.h:102
VP8Frame * framep[4]
Definition: vp8.h:147
static void vp7_filter_mb_row(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.c:2486
#define VP7_MVC_SIZE
Definition: vp8.c:393
static int vp7_read_mv_component(VP56RangeCoder *c, const uint8_t *p)
Definition: vp8.c:816
vp8_mc_func put_vp8_epel_pixels_tab[3][3][3]
first dimension: 4-log2(width) second dimension: 0 if no vertical interpolation is needed; 1 4-tap ve...
Definition: vp8dsp.h:80
static av_always_inline const uint8_t * get_submv_prob(uint32_t left, uint32_t top, int is_vp7)
Definition: vp8.c:827
static const uint8_t vp8_pred8x8c_prob_inter[3]
Definition: vp8data.h:189
uint8_t(* top_nnz)[9]
Definition: vp8.h:227
int num_jobs
Definition: vp8.h:277
static const uint8_t vp8_mbsplits[5][16]
Definition: vp8data.h:127
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:3386
#define AV_RN32A(p)
Definition: intreadwrite.h:531
static int16_t block[64]
Definition: dct.c:115
uint8_t pred16x16[4]
Definition: vp8.h:243
static const int8_t vp8_pred16x16_tree_intra[4][2]
Definition: vp8data.h:47
uint8_t update_map
Definition: vp8.h:174
#define PLANE_PRED8x8
Definition: h264pred.h:71
uint16_t mb_height
Definition: vp8.h:153
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:63
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
int16_t y
Definition: vp56.h:68
int update_golden
VP56_FRAME_NONE if not updated, or which frame to copy if so.
Definition: vp8.h:253
static av_always_inline void filter_mb_row(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr, int is_vp7)
Definition: vp8.c:2427
uint8_t intra4x4_pred_mode_top[4]
Definition: vp8.h:91
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:106
static av_always_inline void clamp_mv(VP8mvbounds *s, VP56mv *dst, const VP56mv *src)
Definition: vp8.c:777
uint8_t
static int vp7_update_dimensions(VP8Context *s, int width, int height)
Definition: vp8.c:204
#define av_cold
Definition: attributes.h:82
ptrdiff_t uvlinesize
Definition: vp8.h:155
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:150
#define mb
#define DC_PRED8x8
Definition: h264pred.h:68
int fade_present
Fade bit present in bitstream (VP7)
Definition: vp8.h:293
static av_always_inline void vp7_decode_mvs(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, int layout)
Definition: vp8.c:945
static VP8Frame * vp8_find_free_buffer(VP8Context *s)
Definition: vp8.c:125
uint8_t ref_frame
Definition: vp8.h:86
static av_always_inline int check_intra_pred4x4_mode_emuedge(int mode, int mb_x, int mb_y, int *copy_buf, int vp7)
Definition: vp8.c:1562
void(* vp8_luma_dc_wht_dc)(int16_t block[4][4][16], int16_t dc[16])
Definition: vp8dsp.h:39
void(* vp8_idct_dc_add4uv)(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride)
Definition: vp8dsp.h:44
Multithreading support functions.
Definition: vp9.h:46
#define TOP_DC_PRED8x8
Definition: h264pred.h:75
int ff_vp8_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: vp8.c:2707
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:395
uint8_t mvc[2][19]
Definition: vp8.h:246
struct VP8Context::@143 qmat[4]
Macroblocks can have one of 4 different quants in a frame when segmentation is enabled.
VP56mv mv
Definition: vp8.h:92
int8_t base_quant[4]
Definition: vp8.h:175
static const uint8_t vp8_mv_update_prob[2][19]
Definition: vp8data.h:733
static AVFrame * frame
void(* pred8x8[4+3+4])(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.h:97
int update_last
update VP56_FRAME_PREVIOUS with the current one
Definition: vp8.h:252
const char data[16]
Definition: mxf.c:90
#define height
uint8_t * data
Definition: avcodec.h:1679
int8_t yoffset
Definition: vp8data.h:62
int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
Definition: utils.c:2127
static av_always_inline int vpX_rac_is_end(VP56RangeCoder *c)
vp5689 returns 1 if the end of the stream has been reached, 0 otherwise.
Definition: vp56.h:234
static void parse_segment_info(VP8Context *s)
Definition: vp8.c:215
VP8Frame * prev_frame
Definition: vp8.h:150
int num_coeff_partitions
All coefficients are contained in separate arith coding contexts.
Definition: vp8.h:266
vp8_mc_func put_pixels_tab[3][3][3]
Definition: vp8.h:271
void ff_thread_finish_setup(AVCodecContext *avctx)
If the codec defines update_thread_context(), call this when they are ready for the next thread to st...
uint8_t token[4][16][3][NUM_DCT_TOKENS - 1]
Definition: vp8.h:245
void(* pred4x4[9+3+3])(uint8_t *src, const uint8_t *topright, ptrdiff_t stride)
Definition: h264pred.h:93
void(* vp8_v_loop_filter8uv)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:52
#define AV_COPY64(d, s)
Definition: intreadwrite.h:595
uint8_t feature_index_prob[4][3]
Definition: vp8.h:307
uint8_t intra4x4_pred_mode_mb[16]
Definition: vp8.h:90
#define av_log(a,...)
static av_always_inline int vp78_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt, int is_vp7)
Definition: vp8.c:2547
uint8_t intra4x4_pred_mode_left[4]
Definition: vp8.h:188
#define VERT_VP8_PRED
for VP8, VERT_PRED is the average of
Definition: h264pred.h:60
uint8_t colorspace
0 is the only value allowed (meaning bt601)
Definition: vp8.h:274
static const VP56mv * get_bmv_ptr(const VP8Macroblock *mb, int subblock)
Definition: vp8.c:939
enum AVCodecID id
Definition: avcodec.h:3753
static const uint8_t vp8_mbsplit_count[4]
Definition: vp8data.h:142
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
static const int8_t vp8_coeff_band_indexes[8][10]
Definition: vp8data.h:331
#define td
Definition: regdef.h:70
H264PredContext hpc
Definition: vp8.h:270
Definition: vp8.h:138
static const uint8_t vp8_pred4x4_mode[]
Definition: vp8data.h:40
static av_always_inline void prefetch_motion(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, int mb_xy, int ref)
Definition: vp8.c:1882
void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
uint8_t absolute_vals
Definition: vp8.h:173
uint16_t mb_width
Definition: vp8.h:152
static const uint8_t vp8_dct_cat2_prob[]
Definition: vp8data.h:345
static const uint8_t vp8_mv_default_prob[2][19]
Definition: vp8data.h:755
#define atomic_load(object)
Definition: stdatomic.h:93
#define FF_SIGNBIT(x)
Definition: internal.h:92
uint8_t last
Definition: vp8.h:241
static const int sizes[][2]
Definition: img2dec.c:51
#define AVERROR(e)
Definition: error.h:43
static int vp8_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
Definition: vp8.c:647
static void fade(uint8_t *dst, ptrdiff_t dst_linesize, const uint8_t *src, ptrdiff_t src_linesize, int width, int height, int alpha, int beta)
Definition: vp8.c:438
uint8_t mode
Definition: vp8.h:85
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:163
static av_always_inline int check_tm_pred8x8_mode(int mode, int mb_x, int mb_y, int vp7)
Definition: vp8.c:1527
static int vp8_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.c:2539
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:181
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:3211
const char * r
Definition: vf_curves.c:111
void(* vp8_idct_add)(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
Definition: vp8dsp.h:40
const uint8_t ff_zigzag_scan[16+1]
Definition: mathtables.c:109
VP8 compatible video decoder.
static const uint8_t vp8_mbfirstidx[4][16]
Definition: vp8data.h:135
AVCodecContext * avctx
Definition: vp8.h:146
#define CONFIG_VP8_DECODER
Definition: config.h:920
uint16_t width
Definition: gdv.c:47
VP8mvbounds mv_bounds
Definition: vp8.h:161
#define EDGE_EMU_LINESIZE
Definition: vp8.h:132
uint16_t inter_dc_pred[2][2]
Interframe DC prediction (VP7) [0] VP56_FRAME_PREVIOUS [1] VP56_FRAME_GOLDEN.
Definition: vp8.h:300
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:222
VideoDSPContext vdsp
Definition: vp8.h:268
const char * name
Name of the codec implementation.
Definition: avcodec.h:3746
VP8Macroblock * macroblocks_base
Definition: vp8.h:250
static av_always_inline void vp8_mc_part(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], ThreadFrame *ref_frame, int x_off, int y_off, int bx_off, int by_off, int block_w, int block_h, int width, int height, VP56mv *mv)
Definition: vp8.c:1844
static av_always_inline int decode_block_coeffs_internal(VP56RangeCoder *r, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS - 1], int i, uint8_t *token_prob, int16_t qmul[2], const uint8_t scan[16], int vp7)
Definition: vp8.c:1263
static av_always_inline void decode_mb_mode(VP8Context *s, VP8mvbounds *mv_bounds, VP8Macroblock *mb, int mb_x, int mb_y, uint8_t *segment, uint8_t *ref, int layout, int is_vp7)
Definition: vp8.c:1171
static const uint8_t vp8_pred4x4_prob_inter[9]
Definition: vp8data.h:192
uint8_t edge_emu_buffer[21 *EDGE_EMU_LINESIZE]
Definition: vp8.h:133
int16_t block[6][4][16]
Definition: vp8.h:107
static const int vp7_mode_contexts[31][4]
Definition: vp8data.h:84
static void vp8_filter_mb_row(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.c:2492
static void vp7_get_quants(VP8Context *s)
Definition: vp8.c:287
#define FFMAX(a, b)
Definition: common.h:94
uint8_t keyframe
Definition: vp8.h:157
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: avcodec.h:1065
int x
Definition: vp8.h:97
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:220
VP56Frame
Definition: vp56.h:40
int16_t luma_qmul[2]
Definition: vp8.h:197
static const uint8_t vp8_pred16x16_prob_inter[4]
Definition: vp8data.h:164
Definition: hls.c:67
useful rectangle filling function
int ff_vp56_init_range_decoder(VP56RangeCoder *c, const uint8_t *buf, int buf_size)
Definition: vp56rac.c:40
#define MAX_THREADS
static av_always_inline void update(SilenceDetectContext *s, AVFrame *insamples, int is_silence, int64_t nb_samples_notify, AVRational time_base)
4x4 blocks of 4x4px each
Definition: vp8.h:71
uint8_t deblock_filter
Definition: vp8.h:158
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:3203
#define H_LOOP_FILTER_16Y_INNER(cond)
#define FFMIN(a, b)
Definition: common.h:96
uint8_t feature_present_prob[4]
Definition: vp8.h:306
static av_always_inline void vp8_mc_chroma(VP8Context *s, VP8ThreadData *td, uint8_t *dst1, uint8_t *dst2, ThreadFrame *ref, const VP56mv *mv, int x_off, int y_off, int block_w, int block_h, int width, int height, ptrdiff_t linesize, vp8_mc_func mc_func[3][3])
chroma MC function
Definition: vp8.c:1794
uint8_t fullrange
whether we can skip clamping in dsp functions
Definition: vp8.h:275
int16_t block_dc[16]
Definition: vp8.h:108
static av_unused int vp8_rac_get_sint(VP56RangeCoder *c, int bits)
Definition: vp56.h:347
int width
picture width / height.
Definition: avcodec.h:1948
uint8_t mbskip
Definition: vp8.h:239
int8_t ref[4]
filter strength adjustment for macroblocks that reference: [0] - intra / VP56_FRAME_CURRENT [1] - VP5...
Definition: vp8.h:223
static int vp7_decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.c:2415
static av_cold int vp8_init_frames(VP8Context *s)
Definition: vp8.c:2736
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
static void free_buffers(VP8Context *s)
Definition: vp8.c:49
int32_t
AVFormatContext * ctx
Definition: movenc.c:48
#define check_thread_pos(td, otd, mb_x_check, mb_y_check)
Definition: vp8.c:2283
static av_always_inline int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
Definition: os2threads.h:98
struct VP8Context::@141 segmentation
Base parameters for segmentation, i.e.
static int vp8_read_mv_component(VP56RangeCoder *c, const uint8_t *p)
Definition: vp8.c:821
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:3204
int16_t luma_dc_qmul[2]
luma dc-only block quant
Definition: vp8.h:198
int16_t chroma_qmul[2]
Definition: vp8.h:199
static const uint8_t vp8_pred4x4_prob_intra[10][10][9]
Definition: vp8data.h:196
#define AV_RL32
Definition: intreadwrite.h:146
VP8mvbounds mv_bounds
Definition: vp8.h:135
uint8_t(* top_border)[16+8+8]
Definition: vp8.h:226
int n
Definition: avisynth_c.h:684
ThreadFrame tf
Definition: vp8.h:139
static av_always_inline int decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr, int is_vp7)
Definition: vp8.c:2287
static av_always_inline void filter_level_for_mb(VP8Context *s, VP8Macroblock *mb, VP8FilterStrength *f, int is_vp7)
Definition: vp8.c:2051
static const int8_t vp7_feature_index_tree[4][2]
Definition: vp8data.h:771
static const uint8_t vp7_feature_value_size[2][4]
Definition: vp8data.h:766
#define vp56_rac_get_prob
Definition: vp56.h:265
if(ret< 0)
Definition: vf_mcdeint.c:279
static void vp8_decode_flush_impl(AVCodecContext *avctx, int free_mem)
Definition: vp8.c:107
static av_always_inline void decode_mb_coeffs(VP8Context *s, VP8ThreadData *td, VP56RangeCoder *c, VP8Macroblock *mb, uint8_t t_nnz[9], uint8_t l_nnz[9], int is_vp7)
Definition: vp8.c:1397
#define LEFT_DC_PRED8x8
Definition: h264pred.h:74
static void vp8_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *cur_frame, VP8Frame *prev_frame)
Definition: vp8.c:2243
#define cat(a, bpp, b)
Definition: vp9dsp_init.h:29
uint8_t segment
Definition: vp8.h:89
#define FF_ARRAY_ELEMS(a)
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
Definition: avcodec.h:3192
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:514
int8_t xoffset
Definition: vp8data.h:63
static const float pred[4]
Definition: siprdata.h:259
static int vp7_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.c:2533
#define IS_VP8
Definition: vp8dsp.h:104
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: avcodec.h:1069
static const int8_t mv[256][2]
Definition: 4xm.c:77
static void vp7_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *cur_frame, VP8Frame *prev_frame)
Definition: vp8.c:2237
int(* update_thread_context)(AVCodecContext *dst, const AVCodecContext *src)
Copy necessary context variables from a previous thread context to the current one.
Definition: avcodec.h:3794
static av_always_inline int check_intra_pred8x8_mode_emuedge(int mode, int mb_x, int mb_y, int vp7)
Definition: vp8.c:1536
static av_always_inline int vp56_rac_get_prob_branchy(VP56RangeCoder *c, int prob)
Definition: vp56.h:282
int(* execute2)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count)
The codec may call this to execute several independent things.
Definition: avcodec.h:3252
#define src1
Definition: h264pred.c:139
static av_always_inline void inter_predict(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], VP8Macroblock *mb, int mb_x, int mb_y)
Apply motion vectors to prediction buffer, chapter 18.
Definition: vp8.c:1905
VP8Frame * curframe
Definition: vp8.h:149
static av_always_inline int decode_block_coeffs(VP56RangeCoder *c, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS - 1], int i, int zero_nhood, int16_t qmul[2], const uint8_t scan[16], int vp7)
Definition: vp8.c:1382
uint8_t simple
Definition: vp8.h:180
VP8Frame frames[5]
Definition: vp8.h:272
Libavcodec external API header.
static const uint8_t vp8_pred8x8c_prob_intra[3]
Definition: vp8data.h:186
void(* vp8_mc_func)(uint8_t *dst, ptrdiff_t dstStride, uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
Definition: vp8dsp.h:33
uint8_t level
Definition: vp8.h:181
void(* vp8_v_loop_filter16y)(uint8_t *dst, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:48
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:232
static void vp8_release_frame(VP8Context *s, VP8Frame *f)
Definition: vp8.c:82
AVBufferRef * seg_map
Definition: vp8.h:140
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
static const uint8_t vp8_token_update_probs[4][8][3][NUM_DCT_TOKENS - 1]
Definition: vp8data.h:540
static const uint16_t vp7_yac_qlookup[]
Definition: vp8data.h:790
main external API structure.
Definition: avcodec.h:1761
static const uint8_t vp8_token_default_probs[4][8][3][NUM_DCT_TOKENS - 1]
Definition: vp8data.h:369
static int vp7_fade_frame(VP8Context *s, VP56RangeCoder *c)
Definition: vp8.c:452
uint8_t * data
The data buffer.
Definition: buffer.h:89
VP8Frame * next_framep[4]
Definition: vp8.h:148
int mb_layout
This describes the macroblock memory layout.
Definition: vp8.h:283
uint8_t left_nnz[9]
For coeff decode, we need to know whether the above block had non-zero coefficients.
Definition: vp8.h:123
static const uint8_t vp8_mbsplit_prob[3]
Definition: vp8data.h:145
VP56RangeCoder c
header context, includes mb modes and motion vectors
Definition: vp8.h:229
void * buf
Definition: avisynth_c.h:690
int y
Definition: vp8.h:98
void(* pred16x16[4+3+2])(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.h:98
VP56RangeCoder coeff_partition[8]
Definition: vp8.h:267
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:83
static const int8_t vp8_pred16x16_tree_inter[4][2]
Definition: vp8data.h:54
int vp7
Definition: vp8.h:288
static int setup_partitions(VP8Context *s, const uint8_t *buf, int buf_size)
Definition: vp8.c:260
int coded_height
Definition: avcodec.h:1963
static int vp8_update_dimensions(VP8Context *s, int width, int height)
Definition: vp8.c:209
static int vp7_decode_block_coeffs_internal(VP56RangeCoder *r, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS - 1], int i, uint8_t *token_prob, int16_t qmul[2], const uint8_t scan[16])
Definition: vp8.c:1345
int index
Definition: gxfenc.c:89
VP8FilterStrength * filter_strength
Definition: vp8.h:134
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:2491
void(* filter_mb_row)(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.h:286
static av_always_inline int check_dc_pred8x8_mode(int mode, int mb_x, int mb_y)
Definition: vp8.c:1518
static void vp78_update_probability_tables(VP8Context *s)
Definition: vp8.c:377
#define MV_EDGE_CHECK(n)
static const int8_t vp8_pred4x4_tree[9][2]
Definition: vp8data.h:168
uint8_t enabled
whether each mb can have a different strength based on mode/ref
Definition: vp8.h:172
static av_always_inline void idct_mb(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], VP8Macroblock *mb)
Definition: vp8.c:1989
static void vp78_update_pred16x16_pred8x8_mvc_probabilities(VP8Context *s, int mvc_size)
Definition: vp8.c:396
static av_always_inline int read_mv_component(VP56RangeCoder *c, const uint8_t *p, int vp7)
Motion vector coding, 17.1.
Definition: vp8.c:788
static const uint8_t subpel_idx[3][8]
Definition: vp8.c:1712
static void update_refs(VP8Context *s)
Definition: vp8.c:416
static av_always_inline int vp8_rac_get_coeff(VP56RangeCoder *c, const uint8_t *prob)
Definition: vp56.h:404
static const uint8_t vp8_coeff_band[16]
Definition: vp8data.h:325
#define u(width,...)
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(INT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&HAVE_MMX) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
void(* vp8_h_loop_filter8uv)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:54
int allocate_progress
Whether to allocate progress for frame threading.
Definition: internal.h:153
static const uint16_t vp8_ac_qlookup[VP8_MAX_QUANT+1]
Definition: vp8data.h:722
static const uint8_t vp8_pred16x16_prob_intra[4]
Definition: vp8data.h:161
uint8_t score
Definition: vp8data.h:65
mfxU16 profile
Definition: qsvenc.c:44
static av_always_inline void decode_intra4x4_modes(VP8Context *s, VP56RangeCoder *c, VP8Macroblock *mb, int mb_x, int keyframe, int layout)
Definition: vp8.c:1136
static int vp8_rac_get_uint(VP56RangeCoder *c, int bits)
Definition: vp56.h:335
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> dc
#define DC_127_PRED8x8
Definition: h264pred.h:85
Definition: vp56.h:66
av_cold int ff_vp8_decode_init(AVCodecContext *avctx)
Definition: vp8.c:2791
#define AV_RL24
Definition: intreadwrite.h:78
int update_altref
Definition: vp8.h:254
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:215
uint8_t feature_enabled[4]
Macroblock features (VP7)
Definition: vp8.h:305
int8_t mode[VP8_MVMODE_SPLIT+1]
filter strength adjustment for the following macroblock modes: [0-3] - i16x16 (always zero) [4] - i4x...
Definition: vp8.h:214
2 8x16 blocks (horizontal)
Definition: vp8.h:69
av_cold int ff_vp8_decode_free(AVCodecContext *avctx)
Definition: vp8.c:2721
Definition: vp9.h:48
#define AV_ZERO128(d)
Definition: intreadwrite.h:627
uint8_t pred8x8c[3]
Definition: vp8.h:244
the normal 219*2^(n-8) "MPEG" YUV ranges
Definition: pixfmt.h:513
void(* vp8_luma_dc_wht)(int16_t block[4][4][16], int16_t dc[16])
Definition: vp8dsp.h:38
void(* vp8_v_loop_filter_simple)(uint8_t *dst, ptrdiff_t stride, int flim)
Definition: vp8dsp.h:69
discard all non reference
Definition: avcodec.h:826
void(* vp8_h_loop_filter_simple)(uint8_t *dst, ptrdiff_t stride, int flim)
Definition: vp8dsp.h:70
static av_always_inline void vp78_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *curframe, VP8Frame *prev_frame, int is_vp7)
Definition: vp8.c:2205
uint8_t partitioning
Definition: vp8.h:87
#define AV_ZERO64(d)
Definition: intreadwrite.h:623
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
int16_t x
Definition: vp56.h:67
common internal api header.
static void vp8_get_quants(VP8Context *s)
Definition: vp8.c:306
void(* vp8_v_loop_filter16y_inner)(uint8_t *dst, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:58
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
#define LOCAL_ALIGNED(a, t, v,...)
Definition: internal.h:113
#define AV_COPY128(d, s)
Definition: intreadwrite.h:599
static int ref_frame(Vp3DecodeContext *s, ThreadFrame *dst, ThreadFrame *src)
Definition: vp3.c:1927
static int vp8_alloc_frame(VP8Context *s, VP8Frame *f, int ref)
Definition: vp8.c:69
uint8_t chroma_pred_mode
Definition: vp8.h:88
static double c[64]
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
VP8intmv mv_max
Definition: vp8.h:103
static av_always_inline void backup_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, ptrdiff_t linesize, ptrdiff_t uvlinesize, int simple)
Definition: vp8.c:1472
static av_always_inline int pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr)
Definition: os2threads.h:127
struct VP8Context::@145 prob[2]
These are all of the updatable probabilities for binary decisions.
#define DC_129_PRED8x8
Definition: h264pred.h:86
enum AVDiscard skip_loop_filter
Skip loop filtering for selected frames.
Definition: avcodec.h:3372
static av_always_inline int vp8_rac_get(VP56RangeCoder *c)
Definition: vp56.h:319
int invisible
Definition: vp8.h:251
static av_always_inline int decode_splitmvs(VP8Context *s, VP56RangeCoder *c, VP8Macroblock *mb, int layout, int is_vp7)
Split motion vector prediction, 16.4.
Definition: vp8.c:844
static const SiprModeParam modes[MODE_COUNT]
Definition: sipr.c:69
int ref_count[3]
Definition: vp8.h:164
void * priv_data
Definition: avcodec.h:1803
static av_always_inline int check_tm_pred4x4_mode(int mode, int mb_x, int mb_y, int vp7)
Definition: vp8.c:1552
struct VP8Context::@144 lf_delta
#define MODE_I4x4
Definition: vp8.h:59
static int vp7_calculate_mb_offset(int mb_x, int mb_y, int mb_width, int xoffset, int yoffset, int boundary, int *edge_x, int *edge_y)
The vp7 reference decoder uses a padding macroblock column (added to right edge of the frame) to guar...
Definition: vp8.c:926
#define XCHG(a, b, xchg)
#define update_pos(td, mb_y, mb_x)
Definition: vp8.c:2284
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:1811
#define HOR_VP8_PRED
unaveraged version of HOR_PRED, see
Definition: h264pred.h:63
VP8DSPContext vp8dsp
Definition: vp8.h:269
static av_always_inline int update_dimensions(VP8Context *s, int width, int height, int is_vp7)
Definition: vp8.c:150
int thread_nr
Definition: vp8.h:124
static av_always_inline void xchg_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, ptrdiff_t linesize, ptrdiff_t uvlinesize, int mb_x, int mb_y, int mb_width, int simple, int xchg)
Definition: vp8.c:1484
#define AV_ZERO32(d)
Definition: intreadwrite.h:619
static av_always_inline int vp78_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr, int is_vp7)
Definition: vp8.c:2499
static const double coeff[2][5]
Definition: vf_owdenoise.c:72
uint64_t layout
AVDiscard
Definition: avcodec.h:821
static av_unused int vp8_rac_get_nn(VP56RangeCoder *c)
Definition: vp56.h:369
#define av_uninit(x)
Definition: attributes.h:148
static av_always_inline void vp8_mc_luma(VP8Context *s, VP8ThreadData *td, uint8_t *dst, ThreadFrame *ref, const VP56mv *mv, int x_off, int y_off, int block_w, int block_h, int width, int height, ptrdiff_t linesize, vp8_mc_func mc_func[3][3])
luma MC function
Definition: vp8.c:1736
static av_always_inline void filter_mb(VP8Context *s, uint8_t *dst[3], VP8FilterStrength *f, int mb_x, int mb_y, int is_vp7)
Definition: vp8.c:2084
#define atomic_init(obj, value)
Definition: stdatomic.h:33
#define av_freep(p)
#define IS_VP7
Definition: vp8dsp.h:103
#define av_always_inline
Definition: attributes.h:39
int8_t filter_level[4]
base loop filter level
Definition: vp8.h:176
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:170
static const int vp8_mode_contexts[6][4]
Definition: vp8data.h:118
static const uint8_t vp8_dct_cat1_prob[]
Definition: vp8data.h:342
#define FFSWAP(type, a, b)
Definition: common.h:99
uint8_t intra
Definition: vp8.h:240
void(* vp8_h_loop_filter16y)(uint8_t *dst, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:50
static av_always_inline void vp8_decode_mvs(VP8Context *s, VP8mvbounds *mv_bounds, VP8Macroblock *mb, int mb_x, int mb_y, int layout)
Definition: vp8.c:1036
uint8_t non_zero_count_cache[6][4]
This is the index plus one of the last non-zero coeff for each of the blocks in the current macrobloc...
Definition: vp8.h:116
uint8_t skip
Definition: vp8.h:82
atomic_int thread_mb_pos
Definition: vp8.h:129
void ff_vp8dsp_init(VP8DSPContext *c)
static void vp78_reset_probability_tables(VP8Context *s)
Definition: vp8.c:368
This structure stores compressed data.
Definition: avcodec.h:1656
#define VP8_MVC_SIZE
Definition: vp8.c:394
static int vp7_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
Definition: vp8.c:491
uint8_t profile
Definition: vp8.h:160
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:1397
const uint8_t *const ff_vp8_dct_cat_prob[]
Definition: vp8data.h:362
mode
Use these values in ebur128_init (or&#39;ed).
Definition: ebur128.h:83
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:1002
VP8ThreadData * thread_data
Definition: vp8.h:145
Predicted.
Definition: avutil.h:275
2x2 blocks of 8x8px each
Definition: vp8.h:70
static av_always_inline void filter_mb_simple(VP8Context *s, uint8_t *dst, VP8FilterStrength *f, int mb_x, int mb_y)
Definition: vp8.c:2171
static const VP7MVPred vp7_mv_pred[VP7_MV_PRED_COUNT]
Definition: vp8data.h:69
static const uint16_t vp7_y2ac_qlookup[]
Definition: vp8data.h:816
static const uint8_t vp7_submv_prob[3]
Definition: vp8data.h:149
static av_always_inline int vp78_decode_init(AVCodecContext *avctx, int is_vp7)
Definition: vp8.c:2748
int(* decode_mb_row_no_filter)(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.h:285
#define AV_WN64(p, v)
Definition: intreadwrite.h:385
uint8_t filter_level
Definition: vp8.h:76