FFmpeg  3.4.9
mpegvideo_enc.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /*
26  * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
27  */
28 
29 /**
30  * @file
31  * The simplest mpeg encoder (well, it was the simplest!).
32  */
33 
34 #include <stdint.h>
35 
36 #include "libavutil/internal.h"
37 #include "libavutil/intmath.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/pixdesc.h"
40 #include "libavutil/opt.h"
41 #include "libavutil/timer.h"
42 #include "avcodec.h"
43 #include "dct.h"
44 #include "idctdsp.h"
45 #include "mpeg12.h"
46 #include "mpegvideo.h"
47 #include "mpegvideodata.h"
48 #include "h261.h"
49 #include "h263.h"
50 #include "h263data.h"
51 #include "mjpegenc_common.h"
52 #include "mathops.h"
53 #include "mpegutils.h"
54 #include "mjpegenc.h"
55 #include "msmpeg4.h"
56 #include "pixblockdsp.h"
57 #include "qpeldsp.h"
58 #include "faandct.h"
59 #include "thread.h"
60 #include "aandcttab.h"
61 #include "flv.h"
62 #include "mpeg4video.h"
63 #include "internal.h"
64 #include "bytestream.h"
65 #include "wmv2.h"
66 #include "rv10.h"
67 #include "libxvid.h"
68 #include <limits.h>
69 #include "sp5x.h"
70 
71 #define QUANT_BIAS_SHIFT 8
72 
73 #define QMAT_SHIFT_MMX 16
74 #define QMAT_SHIFT 21
75 
77 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
78 static int sse_mb(MpegEncContext *s);
79 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
80 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
81 
84 
87  { NULL },
88 };
89 
90 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
91  uint16_t (*qmat16)[2][64],
92  const uint16_t *quant_matrix,
93  int bias, int qmin, int qmax, int intra)
94 {
95  FDCTDSPContext *fdsp = &s->fdsp;
96  int qscale;
97  int shift = 0;
98 
99  for (qscale = qmin; qscale <= qmax; qscale++) {
100  int i;
101  int qscale2;
102 
104  else qscale2 = qscale << 1;
105 
106  if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
107 #if CONFIG_FAANDCT
108  fdsp->fdct == ff_faandct ||
109 #endif /* CONFIG_FAANDCT */
110  fdsp->fdct == ff_jpeg_fdct_islow_10) {
111  for (i = 0; i < 64; i++) {
112  const int j = s->idsp.idct_permutation[i];
113  int64_t den = (int64_t) qscale2 * quant_matrix[j];
114  /* 16 <= qscale * quant_matrix[i] <= 7905
115  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
116  * 19952 <= x <= 249205026
117  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
118  * 3444240 >= (1 << 36) / (x) >= 275 */
119 
120  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
121  }
122  } else if (fdsp->fdct == ff_fdct_ifast) {
123  for (i = 0; i < 64; i++) {
124  const int j = s->idsp.idct_permutation[i];
125  int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
126  /* 16 <= qscale * quant_matrix[i] <= 7905
127  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
128  * 19952 <= x <= 249205026
129  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
130  * 3444240 >= (1 << 36) / (x) >= 275 */
131 
132  qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
133  }
134  } else {
135  for (i = 0; i < 64; i++) {
136  const int j = s->idsp.idct_permutation[i];
137  int64_t den = (int64_t) qscale2 * quant_matrix[j];
138  /* We can safely suppose that 16 <= quant_matrix[i] <= 255
139  * Assume x = qscale * quant_matrix[i]
140  * So 16 <= x <= 7905
141  * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
142  * so 32768 >= (1 << 19) / (x) >= 67 */
143  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
144  //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
145  // (qscale * quant_matrix[i]);
146  qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
147 
148  if (qmat16[qscale][0][i] == 0 ||
149  qmat16[qscale][0][i] == 128 * 256)
150  qmat16[qscale][0][i] = 128 * 256 - 1;
151  qmat16[qscale][1][i] =
152  ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
153  qmat16[qscale][0][i]);
154  }
155  }
156 
157  for (i = intra; i < 64; i++) {
158  int64_t max = 8191;
159  if (fdsp->fdct == ff_fdct_ifast) {
160  max = (8191LL * ff_aanscales[i]) >> 14;
161  }
162  while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
163  shift++;
164  }
165  }
166  }
167  if (shift) {
169  "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
170  QMAT_SHIFT - shift);
171  }
172 }
173 
174 static inline void update_qscale(MpegEncContext *s)
175 {
176  if (s->q_scale_type == 1 && 0) {
177  int i;
178  int bestdiff=INT_MAX;
179  int best = 1;
180 
181  for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
182  int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
183  if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
185  continue;
186  if (diff < bestdiff) {
187  bestdiff = diff;
188  best = i;
189  }
190  }
191  s->qscale = best;
192  } else {
193  s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
194  (FF_LAMBDA_SHIFT + 7);
195  s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
196  }
197 
198  s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
200 }
201 
202 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
203 {
204  int i;
205 
206  if (matrix) {
207  put_bits(pb, 1, 1);
208  for (i = 0; i < 64; i++) {
209  put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
210  }
211  } else
212  put_bits(pb, 1, 0);
213 }
214 
215 /**
216  * init s->current_picture.qscale_table from s->lambda_table
217  */
219 {
220  int8_t * const qscale_table = s->current_picture.qscale_table;
221  int i;
222 
223  for (i = 0; i < s->mb_num; i++) {
224  unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
225  int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
226  qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
227  s->avctx->qmax);
228  }
229 }
230 
233 {
234 #define COPY(a) dst->a= src->a
235  COPY(pict_type);
237  COPY(f_code);
238  COPY(b_code);
239  COPY(qscale);
240  COPY(lambda);
241  COPY(lambda2);
244  COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
245  COPY(progressive_frame); // FIXME don't set in encode_header
246  COPY(partitioned_frame); // FIXME don't set in encode_header
247 #undef COPY
248 }
249 
250 /**
251  * Set the given MpegEncContext to defaults for encoding.
252  * the changed fields will not depend upon the prior state of the MpegEncContext.
253  */
255 {
256  int i;
258 
259  for (i = -16; i < 16; i++) {
260  default_fcode_tab[i + MAX_MV] = 1;
261  }
264 
265  s->input_picture_number = 0;
266  s->picture_in_gop_number = 0;
267 }
268 
270 {
271  if (ARCH_X86)
273 
276  if (!s->dct_quantize)
278  if (!s->denoise_dct)
281  if (s->avctx->trellis)
283 
284  return 0;
285 }
286 
287 /* init video encoder */
289 {
290  MpegEncContext *s = avctx->priv_data;
291  AVCPBProperties *cpb_props;
292  int i, ret, format_supported;
293 
295 
296  switch (avctx->codec_id) {
298  if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
299  avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
300  av_log(avctx, AV_LOG_ERROR,
301  "only YUV420 and YUV422 are supported\n");
302  return -1;
303  }
304  break;
305  case AV_CODEC_ID_MJPEG:
306  case AV_CODEC_ID_AMV:
307  format_supported = 0;
308  /* JPEG color space */
309  if (avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
310  avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
311  avctx->pix_fmt == AV_PIX_FMT_YUVJ444P ||
312  (avctx->color_range == AVCOL_RANGE_JPEG &&
313  (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
314  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
315  avctx->pix_fmt == AV_PIX_FMT_YUV444P)))
316  format_supported = 1;
317  /* MPEG color space */
318  else if (avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL &&
319  (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
320  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
321  avctx->pix_fmt == AV_PIX_FMT_YUV444P))
322  format_supported = 1;
323 
324  if (!format_supported) {
325  av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
326  return -1;
327  }
328  break;
329  default:
330  if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
331  av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
332  return -1;
333  }
334  }
335 
336  switch (avctx->pix_fmt) {
337  case AV_PIX_FMT_YUVJ444P:
338  case AV_PIX_FMT_YUV444P:
340  break;
341  case AV_PIX_FMT_YUVJ422P:
342  case AV_PIX_FMT_YUV422P:
344  break;
345  case AV_PIX_FMT_YUVJ420P:
346  case AV_PIX_FMT_YUV420P:
347  default:
349  break;
350  }
351 
352  avctx->bits_per_raw_sample = av_clip(avctx->bits_per_raw_sample, 0, 8);
353 
354 #if FF_API_PRIVATE_OPT
356  if (avctx->rtp_payload_size)
358  if (avctx->me_penalty_compensation)
360  if (avctx->pre_me)
361  s->me_pre = avctx->pre_me;
363 #endif
364 
365  s->bit_rate = avctx->bit_rate;
366  s->width = avctx->width;
367  s->height = avctx->height;
368  if (avctx->gop_size > 600 &&
370  av_log(avctx, AV_LOG_WARNING,
371  "keyframe interval too large!, reducing it from %d to %d\n",
372  avctx->gop_size, 600);
373  avctx->gop_size = 600;
374  }
375  s->gop_size = avctx->gop_size;
376  s->avctx = avctx;
377  if (avctx->max_b_frames > MAX_B_FRAMES) {
378  av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
379  "is %d.\n", MAX_B_FRAMES);
380  avctx->max_b_frames = MAX_B_FRAMES;
381  }
382  s->max_b_frames = avctx->max_b_frames;
383  s->codec_id = avctx->codec->id;
385  s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
386  s->rtp_mode = !!s->rtp_payload_size;
388 
389  // workaround some differences between how applications specify dc precision
390  if (s->intra_dc_precision < 0) {
391  s->intra_dc_precision += 8;
392  } else if (s->intra_dc_precision >= 8)
393  s->intra_dc_precision -= 8;
394 
395  if (s->intra_dc_precision < 0) {
396  av_log(avctx, AV_LOG_ERROR,
397  "intra dc precision must be positive, note some applications use"
398  " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
399  return AVERROR(EINVAL);
400  }
401 
402  if (avctx->codec_id == AV_CODEC_ID_AMV || (avctx->active_thread_type & FF_THREAD_SLICE))
403  s->huffman = 0;
404 
405  if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
406  av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
407  return AVERROR(EINVAL);
408  }
410 
411  if (s->gop_size <= 1) {
412  s->intra_only = 1;
413  s->gop_size = 12;
414  } else {
415  s->intra_only = 0;
416  }
417 
418 #if FF_API_MOTION_EST
420  s->me_method = avctx->me_method;
422 #endif
423 
424  /* Fixed QSCALE */
425  s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
426 
427 #if FF_API_MPV_OPT
429  if (avctx->border_masking != 0.0)
430  s->border_masking = avctx->border_masking;
432 #endif
433 
434  s->adaptive_quant = (s->avctx->lumi_masking ||
435  s->avctx->dark_masking ||
438  s->avctx->p_masking ||
439  s->border_masking ||
440  (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
441  !s->fixed_qscale;
442 
444 
445  if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
446  switch(avctx->codec_id) {
449  avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
450  break;
451  case AV_CODEC_ID_MPEG4:
455  if (avctx->rc_max_rate >= 15000000) {
456  avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
457  } else if(avctx->rc_max_rate >= 2000000) {
458  avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
459  } else if(avctx->rc_max_rate >= 384000) {
460  avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
461  } else
462  avctx->rc_buffer_size = 40;
463  avctx->rc_buffer_size *= 16384;
464  break;
465  }
466  if (avctx->rc_buffer_size) {
467  av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
468  }
469  }
470 
471  if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
472  av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
473  return -1;
474  }
475 
476  if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
477  av_log(avctx, AV_LOG_INFO,
478  "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
479  }
480 
481  if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
482  av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
483  return -1;
484  }
485 
486  if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
487  av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
488  return -1;
489  }
490 
491  if (avctx->rc_max_rate &&
492  avctx->rc_max_rate == avctx->bit_rate &&
493  avctx->rc_max_rate != avctx->rc_min_rate) {
494  av_log(avctx, AV_LOG_INFO,
495  "impossible bitrate constraints, this will fail\n");
496  }
497 
498  if (avctx->rc_buffer_size &&
499  avctx->bit_rate * (int64_t)avctx->time_base.num >
500  avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
501  av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
502  return -1;
503  }
504 
505  if (!s->fixed_qscale &&
506  avctx->bit_rate * av_q2d(avctx->time_base) >
507  avctx->bit_rate_tolerance) {
508  double nbt = avctx->bit_rate * av_q2d(avctx->time_base) * 5;
509  av_log(avctx, AV_LOG_WARNING,
510  "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
511  if (nbt <= INT_MAX) {
512  avctx->bit_rate_tolerance = nbt;
513  } else
514  avctx->bit_rate_tolerance = INT_MAX;
515  }
516 
517  if (s->avctx->rc_max_rate &&
518  s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
521  90000LL * (avctx->rc_buffer_size - 1) >
522  s->avctx->rc_max_rate * 0xFFFFLL) {
523  av_log(avctx, AV_LOG_INFO,
524  "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
525  "specified vbv buffer is too large for the given bitrate!\n");
526  }
527 
528  if ((s->avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
530  s->codec_id != AV_CODEC_ID_FLV1) {
531  av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
532  return -1;
533  }
534 
535  if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
536  av_log(avctx, AV_LOG_ERROR,
537  "OBMC is only supported with simple mb decision\n");
538  return -1;
539  }
540 
541  if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
542  av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
543  return -1;
544  }
545 
546  if (s->max_b_frames &&
547  s->codec_id != AV_CODEC_ID_MPEG4 &&
550  av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
551  return -1;
552  }
553  if (s->max_b_frames < 0) {
554  av_log(avctx, AV_LOG_ERROR,
555  "max b frames must be 0 or positive for mpegvideo based encoders\n");
556  return -1;
557  }
558 
559  if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
560  s->codec_id == AV_CODEC_ID_H263 ||
561  s->codec_id == AV_CODEC_ID_H263P) &&
562  (avctx->sample_aspect_ratio.num > 255 ||
563  avctx->sample_aspect_ratio.den > 255)) {
564  av_log(avctx, AV_LOG_WARNING,
565  "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
568  avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
569  }
570 
571  if ((s->codec_id == AV_CODEC_ID_H263 ||
572  s->codec_id == AV_CODEC_ID_H263P) &&
573  (avctx->width > 2048 ||
574  avctx->height > 1152 )) {
575  av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
576  return -1;
577  }
578  if ((s->codec_id == AV_CODEC_ID_H263 ||
579  s->codec_id == AV_CODEC_ID_H263P) &&
580  ((avctx->width &3) ||
581  (avctx->height&3) )) {
582  av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
583  return -1;
584  }
585 
586  if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
587  (avctx->width > 4095 ||
588  avctx->height > 4095 )) {
589  av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
590  return -1;
591  }
592 
593  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
594  (avctx->width > 16383 ||
595  avctx->height > 16383 )) {
596  av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
597  return -1;
598  }
599 
600  if (s->codec_id == AV_CODEC_ID_RV10 &&
601  (avctx->width &15 ||
602  avctx->height&15 )) {
603  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
604  return AVERROR(EINVAL);
605  }
606 
607  if (s->codec_id == AV_CODEC_ID_RV20 &&
608  (avctx->width &3 ||
609  avctx->height&3 )) {
610  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
611  return AVERROR(EINVAL);
612  }
613 
614  if ((s->codec_id == AV_CODEC_ID_WMV1 ||
615  s->codec_id == AV_CODEC_ID_WMV2) &&
616  avctx->width & 1) {
617  av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
618  return -1;
619  }
620 
623  av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
624  return -1;
625  }
626 
627 #if FF_API_PRIVATE_OPT
629  if (avctx->mpeg_quant)
630  s->mpeg_quant = avctx->mpeg_quant;
632 #endif
633 
634  // FIXME mpeg2 uses that too
635  if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
636  && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
637  av_log(avctx, AV_LOG_ERROR,
638  "mpeg2 style quantization not supported by codec\n");
639  return -1;
640  }
641 
642  if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
643  av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
644  return -1;
645  }
646 
647  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
649  av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
650  return -1;
651  }
652 
653  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
654  (s->codec_id == AV_CODEC_ID_AMV ||
655  s->codec_id == AV_CODEC_ID_MJPEG)) {
656  // Used to produce garbage with MJPEG.
657  av_log(avctx, AV_LOG_ERROR,
658  "QP RD is no longer compatible with MJPEG or AMV\n");
659  return -1;
660  }
661 
662 #if FF_API_PRIVATE_OPT
664  if (avctx->scenechange_threshold)
667 #endif
668 
669  if (s->scenechange_threshold < 1000000000 &&
671  av_log(avctx, AV_LOG_ERROR,
672  "closed gop with scene change detection are not supported yet, "
673  "set threshold to 1000000000\n");
674  return -1;
675  }
676 
677  if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY) {
678  if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
680  av_log(avctx, AV_LOG_ERROR,
681  "low delay forcing is only available for mpeg2, "
682  "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
683  return -1;
684  }
685  if (s->max_b_frames != 0) {
686  av_log(avctx, AV_LOG_ERROR,
687  "B-frames cannot be used with low delay\n");
688  return -1;
689  }
690  }
691 
692  if (s->q_scale_type == 1) {
693  if (avctx->qmax > 28) {
694  av_log(avctx, AV_LOG_ERROR,
695  "non linear quant only supports qmax <= 28 currently\n");
696  return -1;
697  }
698  }
699 
700  if (avctx->slices > 1 &&
701  (avctx->codec_id == AV_CODEC_ID_FLV1 || avctx->codec_id == AV_CODEC_ID_H261)) {
702  av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
703  return AVERROR(EINVAL);
704  }
705 
706  if (s->avctx->thread_count > 1 &&
707  s->codec_id != AV_CODEC_ID_MPEG4 &&
710  s->codec_id != AV_CODEC_ID_MJPEG &&
711  (s->codec_id != AV_CODEC_ID_H263P)) {
712  av_log(avctx, AV_LOG_ERROR,
713  "multi threaded encoding not supported by codec\n");
714  return -1;
715  }
716 
717  if (s->avctx->thread_count < 1) {
718  av_log(avctx, AV_LOG_ERROR,
719  "automatic thread number detection not supported by codec, "
720  "patch welcome\n");
721  return -1;
722  }
723 
724  if (!avctx->time_base.den || !avctx->time_base.num) {
725  av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
726  return -1;
727  }
728 
729 #if FF_API_PRIVATE_OPT
731  if (avctx->b_frame_strategy)
733  if (avctx->b_sensitivity != 40)
734  s->b_sensitivity = avctx->b_sensitivity;
736 #endif
737 
738  if (s->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
739  av_log(avctx, AV_LOG_INFO,
740  "notice: b_frame_strategy only affects the first pass\n");
741  s->b_frame_strategy = 0;
742  }
743 
744  i = av_gcd(avctx->time_base.den, avctx->time_base.num);
745  if (i > 1) {
746  av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
747  avctx->time_base.den /= i;
748  avctx->time_base.num /= i;
749  //return -1;
750  }
751 
753  // (a + x * 3 / 8) / x
754  s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
755  s->inter_quant_bias = 0;
756  } else {
757  s->intra_quant_bias = 0;
758  // (a - x / 4) / x
759  s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
760  }
761 
762  if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
763  av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
764  return AVERROR(EINVAL);
765  }
766 
767 #if FF_API_QUANT_BIAS
774 #endif
775 
776  av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
777 
778  if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
779  s->avctx->time_base.den > (1 << 16) - 1) {
780  av_log(avctx, AV_LOG_ERROR,
781  "timebase %d/%d not supported by MPEG 4 standard, "
782  "the maximum admitted value for the timebase denominator "
783  "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
784  (1 << 16) - 1);
785  return -1;
786  }
787  s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
788 
789  switch (avctx->codec->id) {
791  s->out_format = FMT_MPEG1;
793  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
794  break;
796  s->out_format = FMT_MPEG1;
798  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
799  s->rtp_mode = 1;
800  break;
801  case AV_CODEC_ID_MJPEG:
802  case AV_CODEC_ID_AMV:
803  s->out_format = FMT_MJPEG;
804  s->intra_only = 1; /* force intra only for jpeg */
805  if (!CONFIG_MJPEG_ENCODER ||
806  ff_mjpeg_encode_init(s) < 0)
807  return -1;
808  avctx->delay = 0;
809  s->low_delay = 1;
810  break;
811  case AV_CODEC_ID_H261:
812  if (!CONFIG_H261_ENCODER)
813  return -1;
814  if (ff_h261_get_picture_format(s->width, s->height) < 0) {
815  av_log(avctx, AV_LOG_ERROR,
816  "The specified picture size of %dx%d is not valid for the "
817  "H.261 codec.\nValid sizes are 176x144, 352x288\n",
818  s->width, s->height);
819  return -1;
820  }
821  s->out_format = FMT_H261;
822  avctx->delay = 0;
823  s->low_delay = 1;
824  s->rtp_mode = 0; /* Sliced encoding not supported */
825  break;
826  case AV_CODEC_ID_H263:
827  if (!CONFIG_H263_ENCODER)
828  return -1;
830  s->width, s->height) == 8) {
831  av_log(avctx, AV_LOG_ERROR,
832  "The specified picture size of %dx%d is not valid for "
833  "the H.263 codec.\nValid sizes are 128x96, 176x144, "
834  "352x288, 704x576, and 1408x1152. "
835  "Try H.263+.\n", s->width, s->height);
836  return -1;
837  }
838  s->out_format = FMT_H263;
839  avctx->delay = 0;
840  s->low_delay = 1;
841  break;
842  case AV_CODEC_ID_H263P:
843  s->out_format = FMT_H263;
844  s->h263_plus = 1;
845  /* Fx */
846  s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
847  s->modified_quant = s->h263_aic;
848  s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
849  s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
850 
851  /* /Fx */
852  /* These are just to be sure */
853  avctx->delay = 0;
854  s->low_delay = 1;
855  break;
856  case AV_CODEC_ID_FLV1:
857  s->out_format = FMT_H263;
858  s->h263_flv = 2; /* format = 1; 11-bit codes */
859  s->unrestricted_mv = 1;
860  s->rtp_mode = 0; /* don't allow GOB */
861  avctx->delay = 0;
862  s->low_delay = 1;
863  break;
864  case AV_CODEC_ID_RV10:
865  s->out_format = FMT_H263;
866  avctx->delay = 0;
867  s->low_delay = 1;
868  break;
869  case AV_CODEC_ID_RV20:
870  s->out_format = FMT_H263;
871  avctx->delay = 0;
872  s->low_delay = 1;
873  s->modified_quant = 1;
874  s->h263_aic = 1;
875  s->h263_plus = 1;
876  s->loop_filter = 1;
877  s->unrestricted_mv = 0;
878  break;
879  case AV_CODEC_ID_MPEG4:
880  s->out_format = FMT_H263;
881  s->h263_pred = 1;
882  s->unrestricted_mv = 1;
883  s->low_delay = s->max_b_frames ? 0 : 1;
884  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
885  break;
887  s->out_format = FMT_H263;
888  s->h263_pred = 1;
889  s->unrestricted_mv = 1;
890  s->msmpeg4_version = 2;
891  avctx->delay = 0;
892  s->low_delay = 1;
893  break;
895  s->out_format = FMT_H263;
896  s->h263_pred = 1;
897  s->unrestricted_mv = 1;
898  s->msmpeg4_version = 3;
899  s->flipflop_rounding = 1;
900  avctx->delay = 0;
901  s->low_delay = 1;
902  break;
903  case AV_CODEC_ID_WMV1:
904  s->out_format = FMT_H263;
905  s->h263_pred = 1;
906  s->unrestricted_mv = 1;
907  s->msmpeg4_version = 4;
908  s->flipflop_rounding = 1;
909  avctx->delay = 0;
910  s->low_delay = 1;
911  break;
912  case AV_CODEC_ID_WMV2:
913  s->out_format = FMT_H263;
914  s->h263_pred = 1;
915  s->unrestricted_mv = 1;
916  s->msmpeg4_version = 5;
917  s->flipflop_rounding = 1;
918  avctx->delay = 0;
919  s->low_delay = 1;
920  break;
921  default:
922  return -1;
923  }
924 
925 #if FF_API_PRIVATE_OPT
927  if (avctx->noise_reduction)
928  s->noise_reduction = avctx->noise_reduction;
930 #endif
931 
932  avctx->has_b_frames = !s->low_delay;
933 
934  s->encoding = 1;
935 
936  s->progressive_frame =
939  s->alternate_scan);
940 
941  /* init */
942  ff_mpv_idct_init(s);
943  if (ff_mpv_common_init(s) < 0)
944  return -1;
945 
946  ff_fdctdsp_init(&s->fdsp, avctx);
947  ff_me_cmp_init(&s->mecc, avctx);
949  ff_pixblockdsp_init(&s->pdsp, avctx);
950  ff_qpeldsp_init(&s->qdsp);
951 
952  if (s->msmpeg4_version) {
954  2 * 2 * (MAX_LEVEL + 1) *
955  (MAX_RUN + 1) * 2 * sizeof(int), fail);
956  }
957  FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
958 
959  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail);
960  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix, 64 * 32 * sizeof(int), fail);
961  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail);
962  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
963  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
964  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
966  MAX_PICTURE_COUNT * sizeof(Picture *), fail);
968  MAX_PICTURE_COUNT * sizeof(Picture *), fail);
969 
970 
971  if (s->noise_reduction) {
973  2 * 64 * sizeof(uint16_t), fail);
974  }
975 
977 
980 
981  if (s->slice_context_count > 1) {
982  s->rtp_mode = 1;
983 
984  if (avctx->codec_id == AV_CODEC_ID_H263P)
985  s->h263_slice_structured = 1;
986  }
987 
988  s->quant_precision = 5;
989 
990 #if FF_API_PRIVATE_OPT
992  if (avctx->frame_skip_threshold)
994  if (avctx->frame_skip_factor)
996  if (avctx->frame_skip_exp)
997  s->frame_skip_exp = avctx->frame_skip_exp;
998  if (avctx->frame_skip_cmp != FF_CMP_DCTMAX)
999  s->frame_skip_cmp = avctx->frame_skip_cmp;
1001 #endif
1002 
1003  ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, s->avctx->ildct_cmp);
1005 
1011  if ((ret = ff_msmpeg4_encode_init(s)) < 0)
1012  return ret;
1014  && s->out_format == FMT_MPEG1)
1016 
1017  /* init q matrix */
1018  for (i = 0; i < 64; i++) {
1019  int j = s->idsp.idct_permutation[i];
1021  s->mpeg_quant) {
1024  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1025  s->intra_matrix[j] =
1027  } else {
1028  /* MPEG-1/2 */
1029  s->chroma_intra_matrix[j] =
1032  }
1033  if (s->avctx->intra_matrix)
1034  s->intra_matrix[j] = s->avctx->intra_matrix[i];
1035  if (s->avctx->inter_matrix)
1036  s->inter_matrix[j] = s->avctx->inter_matrix[i];
1037  }
1038 
1039  /* precompute matrix */
1040  /* for mjpeg, we do include qscale in the matrix */
1041  if (s->out_format != FMT_MJPEG) {
1043  s->intra_matrix, s->intra_quant_bias, avctx->qmin,
1044  31, 1);
1046  s->inter_matrix, s->inter_quant_bias, avctx->qmin,
1047  31, 0);
1048  }
1049 
1050 #if FF_API_RC_STRATEGY
1052  if (!s->rc_strategy)
1053  s->rc_strategy = s->avctx->rc_strategy;
1055 #endif
1056 
1057  if (ff_rate_control_init(s) < 0)
1058  return -1;
1059 
1060 #if FF_API_RC_STRATEGY
1062 #endif
1063 
1065 #if CONFIG_LIBXVID
1066  ret = ff_xvid_rate_control_init(s);
1067 #else
1068  ret = AVERROR(ENOSYS);
1070  "Xvid ratecontrol requires libavcodec compiled with Xvid support.\n");
1071 #endif
1072  if (ret < 0)
1073  return ret;
1074  }
1075 
1076 #if FF_API_ERROR_RATE
1078  if (avctx->error_rate)
1079  s->error_rate = avctx->error_rate;
1081 #endif
1082 
1083 #if FF_API_NORMALIZE_AQP
1085  if (avctx->flags & CODEC_FLAG_NORMALIZE_AQP)
1086  s->mpv_flags |= FF_MPV_FLAG_NAQ;
1088 #endif
1089 
1090 #if FF_API_MV0
1092  if (avctx->flags & CODEC_FLAG_MV0)
1093  s->mpv_flags |= FF_MPV_FLAG_MV0;
1095 #endif
1096 
1097 #if FF_API_MPV_OPT
1099  if (avctx->rc_qsquish != 0.0)
1100  s->rc_qsquish = avctx->rc_qsquish;
1101  if (avctx->rc_qmod_amp != 0.0)
1102  s->rc_qmod_amp = avctx->rc_qmod_amp;
1103  if (avctx->rc_qmod_freq)
1104  s->rc_qmod_freq = avctx->rc_qmod_freq;
1105  if (avctx->rc_buffer_aggressivity != 1.0)
1107  if (avctx->rc_initial_cplx != 0.0)
1108  s->rc_initial_cplx = avctx->rc_initial_cplx;
1109  if (avctx->lmin)
1110  s->lmin = avctx->lmin;
1111  if (avctx->lmax)
1112  s->lmax = avctx->lmax;
1113 
1114  if (avctx->rc_eq) {
1115  av_freep(&s->rc_eq);
1116  s->rc_eq = av_strdup(avctx->rc_eq);
1117  if (!s->rc_eq)
1118  return AVERROR(ENOMEM);
1119  }
1121 #endif
1122 
1123 #if FF_API_PRIVATE_OPT
1125  if (avctx->brd_scale)
1126  s->brd_scale = avctx->brd_scale;
1127 
1128  if (avctx->prediction_method)
1129  s->pred = avctx->prediction_method + 1;
1131 #endif
1132 
1133  if (s->b_frame_strategy == 2) {
1134  for (i = 0; i < s->max_b_frames + 2; i++) {
1135  s->tmp_frames[i] = av_frame_alloc();
1136  if (!s->tmp_frames[i])
1137  return AVERROR(ENOMEM);
1138 
1140  s->tmp_frames[i]->width = s->width >> s->brd_scale;
1141  s->tmp_frames[i]->height = s->height >> s->brd_scale;
1142 
1143  ret = av_frame_get_buffer(s->tmp_frames[i], 32);
1144  if (ret < 0)
1145  return ret;
1146  }
1147  }
1148 
1149  cpb_props = ff_add_cpb_side_data(avctx);
1150  if (!cpb_props)
1151  return AVERROR(ENOMEM);
1152  cpb_props->max_bitrate = avctx->rc_max_rate;
1153  cpb_props->min_bitrate = avctx->rc_min_rate;
1154  cpb_props->avg_bitrate = avctx->bit_rate;
1155  cpb_props->buffer_size = avctx->rc_buffer_size;
1156 
1157  return 0;
1158 fail:
1159  ff_mpv_encode_end(avctx);
1160  return AVERROR_UNKNOWN;
1161 }
1162 
1164 {
1165  MpegEncContext *s = avctx->priv_data;
1166  int i;
1167 
1169 #if CONFIG_LIBXVID
1172 #endif
1173 
1174  ff_mpv_common_end(s);
1175  if (CONFIG_MJPEG_ENCODER &&
1176  s->out_format == FMT_MJPEG)
1178 
1179  av_freep(&avctx->extradata);
1180 
1181  for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1182  av_frame_free(&s->tmp_frames[i]);
1183 
1186 
1187  av_freep(&s->avctx->stats_out);
1188  av_freep(&s->ac_stats);
1189 
1194  av_freep(&s->q_intra_matrix);
1195  av_freep(&s->q_inter_matrix);
1198  av_freep(&s->input_picture);
1200  av_freep(&s->dct_offset);
1201 
1202  return 0;
1203 }
1204 
1205 static int get_sae(uint8_t *src, int ref, int stride)
1206 {
1207  int x,y;
1208  int acc = 0;
1209 
1210  for (y = 0; y < 16; y++) {
1211  for (x = 0; x < 16; x++) {
1212  acc += FFABS(src[x + y * stride] - ref);
1213  }
1214  }
1215 
1216  return acc;
1217 }
1218 
1220  uint8_t *ref, int stride)
1221 {
1222  int x, y, w, h;
1223  int acc = 0;
1224 
1225  w = s->width & ~15;
1226  h = s->height & ~15;
1227 
1228  for (y = 0; y < h; y += 16) {
1229  for (x = 0; x < w; x += 16) {
1230  int offset = x + y * stride;
1231  int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1232  stride, 16);
1233  int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1234  int sae = get_sae(src + offset, mean, stride);
1235 
1236  acc += sae + 500 < sad;
1237  }
1238  }
1239  return acc;
1240 }
1241 
1242 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1243 {
1244  return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1246  s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1247  &s->linesize, &s->uvlinesize);
1248 }
1249 
1250 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1251 {
1252  Picture *pic = NULL;
1253  int64_t pts;
1254  int i, display_picture_number = 0, ret;
1255  int encoding_delay = s->max_b_frames ? s->max_b_frames
1256  : (s->low_delay ? 0 : 1);
1257  int flush_offset = 1;
1258  int direct = 1;
1259 
1260  if (pic_arg) {
1261  pts = pic_arg->pts;
1262  display_picture_number = s->input_picture_number++;
1263 
1264  if (pts != AV_NOPTS_VALUE) {
1265  if (s->user_specified_pts != AV_NOPTS_VALUE) {
1266  int64_t last = s->user_specified_pts;
1267 
1268  if (pts <= last) {
1270  "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1271  pts, last);
1272  return AVERROR(EINVAL);
1273  }
1274 
1275  if (!s->low_delay && display_picture_number == 1)
1276  s->dts_delta = pts - last;
1277  }
1278  s->user_specified_pts = pts;
1279  } else {
1280  if (s->user_specified_pts != AV_NOPTS_VALUE) {
1281  s->user_specified_pts =
1282  pts = s->user_specified_pts + 1;
1283  av_log(s->avctx, AV_LOG_INFO,
1284  "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1285  pts);
1286  } else {
1287  pts = display_picture_number;
1288  }
1289  }
1290 
1291  if (!pic_arg->buf[0] ||
1292  pic_arg->linesize[0] != s->linesize ||
1293  pic_arg->linesize[1] != s->uvlinesize ||
1294  pic_arg->linesize[2] != s->uvlinesize)
1295  direct = 0;
1296  if ((s->width & 15) || (s->height & 15))
1297  direct = 0;
1298  if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1299  direct = 0;
1300  if (s->linesize & (STRIDE_ALIGN-1))
1301  direct = 0;
1302 
1303  ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1304  pic_arg->linesize[1], s->linesize, s->uvlinesize);
1305 
1306  i = ff_find_unused_picture(s->avctx, s->picture, direct);
1307  if (i < 0)
1308  return i;
1309 
1310  pic = &s->picture[i];
1311  pic->reference = 3;
1312 
1313  if (direct) {
1314  if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1315  return ret;
1316  }
1317  ret = alloc_picture(s, pic, direct);
1318  if (ret < 0)
1319  return ret;
1320 
1321  if (!direct) {
1322  if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1323  pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1324  pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1325  // empty
1326  } else {
1327  int h_chroma_shift, v_chroma_shift;
1329  &h_chroma_shift,
1330  &v_chroma_shift);
1331 
1332  for (i = 0; i < 3; i++) {
1333  int src_stride = pic_arg->linesize[i];
1334  int dst_stride = i ? s->uvlinesize : s->linesize;
1335  int h_shift = i ? h_chroma_shift : 0;
1336  int v_shift = i ? v_chroma_shift : 0;
1337  int w = s->width >> h_shift;
1338  int h = s->height >> v_shift;
1339  uint8_t *src = pic_arg->data[i];
1340  uint8_t *dst = pic->f->data[i];
1341  int vpad = 16;
1342 
1343  if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1344  && !s->progressive_sequence
1345  && FFALIGN(s->height, 32) - s->height > 16)
1346  vpad = 32;
1347 
1348  if (!s->avctx->rc_buffer_size)
1349  dst += INPLACE_OFFSET;
1350 
1351  if (src_stride == dst_stride)
1352  memcpy(dst, src, src_stride * h);
1353  else {
1354  int h2 = h;
1355  uint8_t *dst2 = dst;
1356  while (h2--) {
1357  memcpy(dst2, src, w);
1358  dst2 += dst_stride;
1359  src += src_stride;
1360  }
1361  }
1362  if ((s->width & 15) || (s->height & (vpad-1))) {
1363  s->mpvencdsp.draw_edges(dst, dst_stride,
1364  w, h,
1365  16 >> h_shift,
1366  vpad >> v_shift,
1367  EDGE_BOTTOM);
1368  }
1369  }
1370  emms_c();
1371  }
1372  }
1373  ret = av_frame_copy_props(pic->f, pic_arg);
1374  if (ret < 0)
1375  return ret;
1376 
1377  pic->f->display_picture_number = display_picture_number;
1378  pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1379  } else {
1380  /* Flushing: When we have not received enough input frames,
1381  * ensure s->input_picture[0] contains the first picture */
1382  for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1383  if (s->input_picture[flush_offset])
1384  break;
1385 
1386  if (flush_offset <= 1)
1387  flush_offset = 1;
1388  else
1389  encoding_delay = encoding_delay - flush_offset + 1;
1390  }
1391 
1392  /* shift buffer entries */
1393  for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1394  s->input_picture[i - flush_offset] = s->input_picture[i];
1395 
1396  s->input_picture[encoding_delay] = (Picture*) pic;
1397 
1398  return 0;
1399 }
1400 
1402 {
1403  int x, y, plane;
1404  int score = 0;
1405  int64_t score64 = 0;
1406 
1407  for (plane = 0; plane < 3; plane++) {
1408  const int stride = p->f->linesize[plane];
1409  const int bw = plane ? 1 : 2;
1410  for (y = 0; y < s->mb_height * bw; y++) {
1411  for (x = 0; x < s->mb_width * bw; x++) {
1412  int off = p->shared ? 0 : 16;
1413  uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1414  uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1415  int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1416 
1417  switch (FFABS(s->frame_skip_exp)) {
1418  case 0: score = FFMAX(score, v); break;
1419  case 1: score += FFABS(v); break;
1420  case 2: score64 += v * (int64_t)v; break;
1421  case 3: score64 += FFABS(v * (int64_t)v * v); break;
1422  case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1423  }
1424  }
1425  }
1426  }
1427  emms_c();
1428 
1429  if (score)
1430  score64 = score;
1431  if (s->frame_skip_exp < 0)
1432  score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1433  -1.0/s->frame_skip_exp);
1434 
1435  if (score64 < s->frame_skip_threshold)
1436  return 1;
1437  if (score64 < ((s->frame_skip_factor * (int64_t) s->lambda) >> 8))
1438  return 1;
1439  return 0;
1440 }
1441 
1443 {
1444  AVPacket pkt = { 0 };
1445  int ret;
1446  int size = 0;
1447 
1448  av_init_packet(&pkt);
1449 
1450  ret = avcodec_send_frame(c, frame);
1451  if (ret < 0)
1452  return ret;
1453 
1454  do {
1455  ret = avcodec_receive_packet(c, &pkt);
1456  if (ret >= 0) {
1457  size += pkt.size;
1458  av_packet_unref(&pkt);
1459  } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1460  return ret;
1461  } while (ret >= 0);
1462 
1463  return size;
1464 }
1465 
1467 {
1468  const AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1469  const int scale = s->brd_scale;
1470  int width = s->width >> scale;
1471  int height = s->height >> scale;
1472  int i, j, out_size, p_lambda, b_lambda, lambda2;
1473  int64_t best_rd = INT64_MAX;
1474  int best_b_count = -1;
1475  int ret = 0;
1476 
1477  av_assert0(scale >= 0 && scale <= 3);
1478 
1479  //emms_c();
1480  //s->next_picture_ptr->quality;
1481  p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1482  //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1483  b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1484  if (!b_lambda) // FIXME we should do this somewhere else
1485  b_lambda = p_lambda;
1486  lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1488 
1489  for (i = 0; i < s->max_b_frames + 2; i++) {
1490  Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1491  s->next_picture_ptr;
1492  uint8_t *data[4];
1493 
1494  if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1495  pre_input = *pre_input_ptr;
1496  memcpy(data, pre_input_ptr->f->data, sizeof(data));
1497 
1498  if (!pre_input.shared && i) {
1499  data[0] += INPLACE_OFFSET;
1500  data[1] += INPLACE_OFFSET;
1501  data[2] += INPLACE_OFFSET;
1502  }
1503 
1504  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1505  s->tmp_frames[i]->linesize[0],
1506  data[0],
1507  pre_input.f->linesize[0],
1508  width, height);
1509  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1510  s->tmp_frames[i]->linesize[1],
1511  data[1],
1512  pre_input.f->linesize[1],
1513  width >> 1, height >> 1);
1514  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1515  s->tmp_frames[i]->linesize[2],
1516  data[2],
1517  pre_input.f->linesize[2],
1518  width >> 1, height >> 1);
1519  }
1520  }
1521 
1522  for (j = 0; j < s->max_b_frames + 1; j++) {
1523  AVCodecContext *c;
1524  int64_t rd = 0;
1525 
1526  if (!s->input_picture[j])
1527  break;
1528 
1530  if (!c)
1531  return AVERROR(ENOMEM);
1532 
1533  c->width = width;
1534  c->height = height;
1536  c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1537  c->mb_decision = s->avctx->mb_decision;
1538  c->me_cmp = s->avctx->me_cmp;
1539  c->mb_cmp = s->avctx->mb_cmp;
1540  c->me_sub_cmp = s->avctx->me_sub_cmp;
1542  c->time_base = s->avctx->time_base;
1543  c->max_b_frames = s->max_b_frames;
1544 
1545  ret = avcodec_open2(c, codec, NULL);
1546  if (ret < 0)
1547  goto fail;
1548 
1550  s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1551 
1552  out_size = encode_frame(c, s->tmp_frames[0]);
1553  if (out_size < 0) {
1554  ret = out_size;
1555  goto fail;
1556  }
1557 
1558  //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1559 
1560  for (i = 0; i < s->max_b_frames + 1; i++) {
1561  int is_p = i % (j + 1) == j || i == s->max_b_frames;
1562 
1563  s->tmp_frames[i + 1]->pict_type = is_p ?
1565  s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1566 
1567  out_size = encode_frame(c, s->tmp_frames[i + 1]);
1568  if (out_size < 0) {
1569  ret = out_size;
1570  goto fail;
1571  }
1572 
1573  rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1574  }
1575 
1576  /* get the delayed frames */
1577  out_size = encode_frame(c, NULL);
1578  if (out_size < 0) {
1579  ret = out_size;
1580  goto fail;
1581  }
1582  rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1583 
1584  rd += c->error[0] + c->error[1] + c->error[2];
1585 
1586  if (rd < best_rd) {
1587  best_rd = rd;
1588  best_b_count = j;
1589  }
1590 
1591 fail:
1593  if (ret < 0)
1594  return ret;
1595  }
1596 
1597  return best_b_count;
1598 }
1599 
1601 {
1602  int i, ret;
1603 
1604  for (i = 1; i < MAX_PICTURE_COUNT; i++)
1606  s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1607 
1608  /* set next picture type & ordering */
1609  if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1610  if (s->frame_skip_threshold || s->frame_skip_factor) {
1611  if (s->picture_in_gop_number < s->gop_size &&
1612  s->next_picture_ptr &&
1613  skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1614  // FIXME check that the gop check above is +-1 correct
1615  av_frame_unref(s->input_picture[0]->f);
1616 
1617  ff_vbv_update(s, 0);
1618 
1619  goto no_output_pic;
1620  }
1621  }
1622 
1623  if (/*s->picture_in_gop_number >= s->gop_size ||*/
1624  !s->next_picture_ptr || s->intra_only) {
1625  s->reordered_input_picture[0] = s->input_picture[0];
1628  s->coded_picture_number++;
1629  } else {
1630  int b_frames = 0;
1631 
1632  if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1633  for (i = 0; i < s->max_b_frames + 1; i++) {
1634  int pict_num = s->input_picture[0]->f->display_picture_number + i;
1635 
1636  if (pict_num >= s->rc_context.num_entries)
1637  break;
1638  if (!s->input_picture[i]) {
1639  s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1640  break;
1641  }
1642 
1643  s->input_picture[i]->f->pict_type =
1644  s->rc_context.entry[pict_num].new_pict_type;
1645  }
1646  }
1647 
1648  if (s->b_frame_strategy == 0) {
1649  b_frames = s->max_b_frames;
1650  while (b_frames && !s->input_picture[b_frames])
1651  b_frames--;
1652  } else if (s->b_frame_strategy == 1) {
1653  for (i = 1; i < s->max_b_frames + 1; i++) {
1654  if (s->input_picture[i] &&
1655  s->input_picture[i]->b_frame_score == 0) {
1656  s->input_picture[i]->b_frame_score =
1657  get_intra_count(s,
1658  s->input_picture[i ]->f->data[0],
1659  s->input_picture[i - 1]->f->data[0],
1660  s->linesize) + 1;
1661  }
1662  }
1663  for (i = 0; i < s->max_b_frames + 1; i++) {
1664  if (!s->input_picture[i] ||
1665  s->input_picture[i]->b_frame_score - 1 >
1666  s->mb_num / s->b_sensitivity)
1667  break;
1668  }
1669 
1670  b_frames = FFMAX(0, i - 1);
1671 
1672  /* reset scores */
1673  for (i = 0; i < b_frames + 1; i++) {
1674  s->input_picture[i]->b_frame_score = 0;
1675  }
1676  } else if (s->b_frame_strategy == 2) {
1677  b_frames = estimate_best_b_count(s);
1678  if (b_frames < 0)
1679  return b_frames;
1680  }
1681 
1682  emms_c();
1683 
1684  for (i = b_frames - 1; i >= 0; i--) {
1685  int type = s->input_picture[i]->f->pict_type;
1686  if (type && type != AV_PICTURE_TYPE_B)
1687  b_frames = i;
1688  }
1689  if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1690  b_frames == s->max_b_frames) {
1692  "warning, too many B-frames in a row\n");
1693  }
1694 
1695  if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1696  if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1697  s->gop_size > s->picture_in_gop_number) {
1698  b_frames = s->gop_size - s->picture_in_gop_number - 1;
1699  } else {
1701  b_frames = 0;
1702  s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1703  }
1704  }
1705 
1706  if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1707  s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1708  b_frames--;
1709 
1710  s->reordered_input_picture[0] = s->input_picture[b_frames];
1714  s->coded_picture_number++;
1715  for (i = 0; i < b_frames; i++) {
1716  s->reordered_input_picture[i + 1] = s->input_picture[i];
1717  s->reordered_input_picture[i + 1]->f->pict_type =
1720  s->coded_picture_number++;
1721  }
1722  }
1723  }
1724 no_output_pic:
1726 
1727  if (s->reordered_input_picture[0]) {
1730  AV_PICTURE_TYPE_B ? 3 : 0;
1731 
1732  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1733  return ret;
1734 
1735  if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1736  // input is a shared pix, so we can't modify it -> allocate a new
1737  // one & ensure that the shared one is reuseable
1738 
1739  Picture *pic;
1740  int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1741  if (i < 0)
1742  return i;
1743  pic = &s->picture[i];
1744 
1746  if (alloc_picture(s, pic, 0) < 0) {
1747  return -1;
1748  }
1749 
1750  ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1751  if (ret < 0)
1752  return ret;
1753 
1754  /* mark us unused / free shared pic */
1756  s->reordered_input_picture[0]->shared = 0;
1757 
1758  s->current_picture_ptr = pic;
1759  } else {
1760  // input is not a shared pix -> reuse buffer for current_pix
1762  for (i = 0; i < 4; i++) {
1763  s->new_picture.f->data[i] += INPLACE_OFFSET;
1764  }
1765  }
1767  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1768  s->current_picture_ptr)) < 0)
1769  return ret;
1770 
1772  }
1773  return 0;
1774 }
1775 
1776 static void frame_end(MpegEncContext *s)
1777 {
1778  if (s->unrestricted_mv &&
1780  !s->intra_only) {
1782  int hshift = desc->log2_chroma_w;
1783  int vshift = desc->log2_chroma_h;
1785  s->current_picture.f->linesize[0],
1786  s->h_edge_pos, s->v_edge_pos,
1788  EDGE_TOP | EDGE_BOTTOM);
1790  s->current_picture.f->linesize[1],
1791  s->h_edge_pos >> hshift,
1792  s->v_edge_pos >> vshift,
1793  EDGE_WIDTH >> hshift,
1794  EDGE_WIDTH >> vshift,
1795  EDGE_TOP | EDGE_BOTTOM);
1797  s->current_picture.f->linesize[2],
1798  s->h_edge_pos >> hshift,
1799  s->v_edge_pos >> vshift,
1800  EDGE_WIDTH >> hshift,
1801  EDGE_WIDTH >> vshift,
1802  EDGE_TOP | EDGE_BOTTOM);
1803  }
1804 
1805  emms_c();
1806 
1807  s->last_pict_type = s->pict_type;
1809  if (s->pict_type!= AV_PICTURE_TYPE_B)
1811 
1812 #if FF_API_CODED_FRAME
1817 #endif
1818 #if FF_API_ERROR_FRAME
1821  sizeof(s->current_picture.encoding_error));
1823 #endif
1824 }
1825 
1827 {
1828  int intra, i;
1829 
1830  for (intra = 0; intra < 2; intra++) {
1831  if (s->dct_count[intra] > (1 << 16)) {
1832  for (i = 0; i < 64; i++) {
1833  s->dct_error_sum[intra][i] >>= 1;
1834  }
1835  s->dct_count[intra] >>= 1;
1836  }
1837 
1838  for (i = 0; i < 64; i++) {
1839  s->dct_offset[intra][i] = (s->noise_reduction *
1840  s->dct_count[intra] +
1841  s->dct_error_sum[intra][i] / 2) /
1842  (s->dct_error_sum[intra][i] + 1);
1843  }
1844  }
1845 }
1846 
1848 {
1849  int ret;
1850 
1851  /* mark & release old frames */
1852  if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1854  s->last_picture_ptr->f->buf[0]) {
1856  }
1857 
1860 
1862  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1863  s->current_picture_ptr)) < 0)
1864  return ret;
1865 
1866  if (s->pict_type != AV_PICTURE_TYPE_B) {
1868  if (!s->droppable)
1870  }
1871 
1872  if (s->last_picture_ptr) {
1874  if (s->last_picture_ptr->f->buf[0] &&
1875  (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1876  s->last_picture_ptr)) < 0)
1877  return ret;
1878  }
1879  if (s->next_picture_ptr) {
1881  if (s->next_picture_ptr->f->buf[0] &&
1882  (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1883  s->next_picture_ptr)) < 0)
1884  return ret;
1885  }
1886 
1887  if (s->picture_structure!= PICT_FRAME) {
1888  int i;
1889  for (i = 0; i < 4; i++) {
1891  s->current_picture.f->data[i] +=
1892  s->current_picture.f->linesize[i];
1893  }
1894  s->current_picture.f->linesize[i] *= 2;
1895  s->last_picture.f->linesize[i] *= 2;
1896  s->next_picture.f->linesize[i] *= 2;
1897  }
1898  }
1899 
1900  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1903  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1906  } else {
1909  }
1910 
1911  if (s->dct_error_sum) {
1914  }
1915 
1916  return 0;
1917 }
1918 
1920  const AVFrame *pic_arg, int *got_packet)
1921 {
1922  MpegEncContext *s = avctx->priv_data;
1923  int i, stuffing_count, ret;
1924  int context_count = s->slice_context_count;
1925 
1926  s->vbv_ignore_qmax = 0;
1927 
1928  s->picture_in_gop_number++;
1929 
1930  if (load_input_picture(s, pic_arg) < 0)
1931  return -1;
1932 
1933  if (select_input_picture(s) < 0) {
1934  return -1;
1935  }
1936 
1937  /* output? */
1938  if (s->new_picture.f->data[0]) {
1939  int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
1940  int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - AV_INPUT_BUFFER_PADDING_SIZE
1941  :
1942  s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000;
1943  if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size, 0)) < 0)
1944  return ret;
1945  if (s->mb_info) {
1948  s->mb_width*s->mb_height*12);
1949  s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1950  }
1951 
1952  for (i = 0; i < context_count; i++) {
1953  int start_y = s->thread_context[i]->start_mb_y;
1954  int end_y = s->thread_context[i]-> end_mb_y;
1955  int h = s->mb_height;
1956  uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1957  uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1958 
1959  init_put_bits(&s->thread_context[i]->pb, start, end - start);
1960  }
1961 
1962  s->pict_type = s->new_picture.f->pict_type;
1963  //emms_c();
1964  ret = frame_start(s);
1965  if (ret < 0)
1966  return ret;
1967 vbv_retry:
1968  ret = encode_picture(s, s->picture_number);
1969  if (growing_buffer) {
1970  av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1971  pkt->data = s->pb.buf;
1972  pkt->size = avctx->internal->byte_buffer_size;
1973  }
1974  if (ret < 0)
1975  return -1;
1976 
1977 #if FF_API_STAT_BITS
1979  avctx->header_bits = s->header_bits;
1980  avctx->mv_bits = s->mv_bits;
1981  avctx->misc_bits = s->misc_bits;
1982  avctx->i_tex_bits = s->i_tex_bits;
1983  avctx->p_tex_bits = s->p_tex_bits;
1984  avctx->i_count = s->i_count;
1985  // FIXME f/b_count in avctx
1986  avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1987  avctx->skip_count = s->skip_count;
1989 #endif
1990 
1991  frame_end(s);
1992 
1995 
1996  if (avctx->rc_buffer_size) {
1997  RateControlContext *rcc = &s->rc_context;
1998  int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1999  int hq = (s->avctx->mb_decision == FF_MB_DECISION_RD || s->avctx->trellis);
2000  int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
2001 
2002  if (put_bits_count(&s->pb) > max_size &&
2003  s->lambda < s->lmax) {
2004  s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
2005  (s->qscale + 1) / s->qscale);
2006  if (s->adaptive_quant) {
2007  int i;
2008  for (i = 0; i < s->mb_height * s->mb_stride; i++)
2009  s->lambda_table[i] =
2010  FFMAX(s->lambda_table[i] + min_step,
2011  s->lambda_table[i] * (s->qscale + 1) /
2012  s->qscale);
2013  }
2014  s->mb_skipped = 0; // done in frame_start()
2015  // done in encode_picture() so we must undo it
2016  if (s->pict_type == AV_PICTURE_TYPE_P) {
2017  if (s->flipflop_rounding ||
2018  s->codec_id == AV_CODEC_ID_H263P ||
2020  s->no_rounding ^= 1;
2021  }
2022  if (s->pict_type != AV_PICTURE_TYPE_B) {
2023  s->time_base = s->last_time_base;
2024  s->last_non_b_time = s->time - s->pp_time;
2025  }
2026  for (i = 0; i < context_count; i++) {
2027  PutBitContext *pb = &s->thread_context[i]->pb;
2028  init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
2029  }
2030  s->vbv_ignore_qmax = 1;
2031  av_log(s->avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
2032  goto vbv_retry;
2033  }
2034 
2036  }
2037 
2038  if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
2040 
2041  for (i = 0; i < 4; i++) {
2043  avctx->error[i] += s->current_picture_ptr->encoding_error[i];
2044  }
2047  (s->avctx->flags&AV_CODEC_FLAG_PSNR) ? 4 : 0,
2048  s->pict_type);
2049 
2050  if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
2051  assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
2052  s->misc_bits + s->i_tex_bits +
2053  s->p_tex_bits);
2054  flush_put_bits(&s->pb);
2055  s->frame_bits = put_bits_count(&s->pb);
2056 
2057  stuffing_count = ff_vbv_update(s, s->frame_bits);
2058  s->stuffing_bits = 8*stuffing_count;
2059  if (stuffing_count) {
2060  if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
2061  stuffing_count + 50) {
2062  av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
2063  return -1;
2064  }
2065 
2066  switch (s->codec_id) {
2069  while (stuffing_count--) {
2070  put_bits(&s->pb, 8, 0);
2071  }
2072  break;
2073  case AV_CODEC_ID_MPEG4:
2074  put_bits(&s->pb, 16, 0);
2075  put_bits(&s->pb, 16, 0x1C3);
2076  stuffing_count -= 4;
2077  while (stuffing_count--) {
2078  put_bits(&s->pb, 8, 0xFF);
2079  }
2080  break;
2081  default:
2082  av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
2083  }
2084  flush_put_bits(&s->pb);
2085  s->frame_bits = put_bits_count(&s->pb);
2086  }
2087 
2088  /* update MPEG-1/2 vbv_delay for CBR */
2089  if (s->avctx->rc_max_rate &&
2090  s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
2091  s->out_format == FMT_MPEG1 &&
2092  90000LL * (avctx->rc_buffer_size - 1) <=
2093  s->avctx->rc_max_rate * 0xFFFFLL) {
2094  AVCPBProperties *props;
2095  size_t props_size;
2096 
2097  int vbv_delay, min_delay;
2098  double inbits = s->avctx->rc_max_rate *
2099  av_q2d(s->avctx->time_base);
2100  int minbits = s->frame_bits - 8 *
2101  (s->vbv_delay_ptr - s->pb.buf - 1);
2102  double bits = s->rc_context.buffer_index + minbits - inbits;
2103 
2104  if (bits < 0)
2106  "Internal error, negative bits\n");
2107 
2108  assert(s->repeat_first_field == 0);
2109 
2110  vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
2111  min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
2112  s->avctx->rc_max_rate;
2113 
2114  vbv_delay = FFMAX(vbv_delay, min_delay);
2115 
2116  av_assert0(vbv_delay < 0xFFFF);
2117 
2118  s->vbv_delay_ptr[0] &= 0xF8;
2119  s->vbv_delay_ptr[0] |= vbv_delay >> 13;
2120  s->vbv_delay_ptr[1] = vbv_delay >> 5;
2121  s->vbv_delay_ptr[2] &= 0x07;
2122  s->vbv_delay_ptr[2] |= vbv_delay << 3;
2123 
2124  props = av_cpb_properties_alloc(&props_size);
2125  if (!props)
2126  return AVERROR(ENOMEM);
2127  props->vbv_delay = vbv_delay * 300;
2128 
2130  (uint8_t*)props, props_size);
2131  if (ret < 0) {
2132  av_freep(&props);
2133  return ret;
2134  }
2135 
2136 #if FF_API_VBV_DELAY
2138  avctx->vbv_delay = vbv_delay * 300;
2140 #endif
2141  }
2142  s->total_bits += s->frame_bits;
2143 #if FF_API_STAT_BITS
2145  avctx->frame_bits = s->frame_bits;
2147 #endif
2148 
2149 
2150  pkt->pts = s->current_picture.f->pts;
2151  if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
2153  pkt->dts = pkt->pts - s->dts_delta;
2154  else
2155  pkt->dts = s->reordered_pts;
2156  s->reordered_pts = pkt->pts;
2157  } else
2158  pkt->dts = pkt->pts;
2159  if (s->current_picture.f->key_frame)
2160  pkt->flags |= AV_PKT_FLAG_KEY;
2161  if (s->mb_info)
2163  } else {
2164  s->frame_bits = 0;
2165  }
2166 
2167  /* release non-reference frames */
2168  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
2169  if (!s->picture[i].reference)
2170  ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
2171  }
2172 
2173  av_assert1((s->frame_bits & 7) == 0);
2174 
2175  pkt->size = s->frame_bits / 8;
2176  *got_packet = !!pkt->size;
2177  return 0;
2178 }
2179 
2181  int n, int threshold)
2182 {
2183  static const char tab[64] = {
2184  3, 2, 2, 1, 1, 1, 1, 1,
2185  1, 1, 1, 1, 1, 1, 1, 1,
2186  1, 1, 1, 1, 1, 1, 1, 1,
2187  0, 0, 0, 0, 0, 0, 0, 0,
2188  0, 0, 0, 0, 0, 0, 0, 0,
2189  0, 0, 0, 0, 0, 0, 0, 0,
2190  0, 0, 0, 0, 0, 0, 0, 0,
2191  0, 0, 0, 0, 0, 0, 0, 0
2192  };
2193  int score = 0;
2194  int run = 0;
2195  int i;
2196  int16_t *block = s->block[n];
2197  const int last_index = s->block_last_index[n];
2198  int skip_dc;
2199 
2200  if (threshold < 0) {
2201  skip_dc = 0;
2202  threshold = -threshold;
2203  } else
2204  skip_dc = 1;
2205 
2206  /* Are all we could set to zero already zero? */
2207  if (last_index <= skip_dc - 1)
2208  return;
2209 
2210  for (i = 0; i <= last_index; i++) {
2211  const int j = s->intra_scantable.permutated[i];
2212  const int level = FFABS(block[j]);
2213  if (level == 1) {
2214  if (skip_dc && i == 0)
2215  continue;
2216  score += tab[run];
2217  run = 0;
2218  } else if (level > 1) {
2219  return;
2220  } else {
2221  run++;
2222  }
2223  }
2224  if (score >= threshold)
2225  return;
2226  for (i = skip_dc; i <= last_index; i++) {
2227  const int j = s->intra_scantable.permutated[i];
2228  block[j] = 0;
2229  }
2230  if (block[0])
2231  s->block_last_index[n] = 0;
2232  else
2233  s->block_last_index[n] = -1;
2234 }
2235 
2236 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2237  int last_index)
2238 {
2239  int i;
2240  const int maxlevel = s->max_qcoeff;
2241  const int minlevel = s->min_qcoeff;
2242  int overflow = 0;
2243 
2244  if (s->mb_intra) {
2245  i = 1; // skip clipping of intra dc
2246  } else
2247  i = 0;
2248 
2249  for (; i <= last_index; i++) {
2250  const int j = s->intra_scantable.permutated[i];
2251  int level = block[j];
2252 
2253  if (level > maxlevel) {
2254  level = maxlevel;
2255  overflow++;
2256  } else if (level < minlevel) {
2257  level = minlevel;
2258  overflow++;
2259  }
2260 
2261  block[j] = level;
2262  }
2263 
2264  if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2265  av_log(s->avctx, AV_LOG_INFO,
2266  "warning, clipping %d dct coefficients to %d..%d\n",
2267  overflow, minlevel, maxlevel);
2268 }
2269 
2270 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2271 {
2272  int x, y;
2273  // FIXME optimize
2274  for (y = 0; y < 8; y++) {
2275  for (x = 0; x < 8; x++) {
2276  int x2, y2;
2277  int sum = 0;
2278  int sqr = 0;
2279  int count = 0;
2280 
2281  for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2282  for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2283  int v = ptr[x2 + y2 * stride];
2284  sum += v;
2285  sqr += v * v;
2286  count++;
2287  }
2288  }
2289  weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2290  }
2291  }
2292 }
2293 
2295  int motion_x, int motion_y,
2296  int mb_block_height,
2297  int mb_block_width,
2298  int mb_block_count)
2299 {
2300  int16_t weight[12][64];
2301  int16_t orig[12][64];
2302  const int mb_x = s->mb_x;
2303  const int mb_y = s->mb_y;
2304  int i;
2305  int skip_dct[12];
2306  int dct_offset = s->linesize * 8; // default for progressive frames
2307  int uv_dct_offset = s->uvlinesize * 8;
2308  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2309  ptrdiff_t wrap_y, wrap_c;
2310 
2311  for (i = 0; i < mb_block_count; i++)
2312  skip_dct[i] = s->skipdct;
2313 
2314  if (s->adaptive_quant) {
2315  const int last_qp = s->qscale;
2316  const int mb_xy = mb_x + mb_y * s->mb_stride;
2317 
2318  s->lambda = s->lambda_table[mb_xy];
2319  update_qscale(s);
2320 
2321  if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2322  s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2323  s->dquant = s->qscale - last_qp;
2324 
2325  if (s->out_format == FMT_H263) {
2326  s->dquant = av_clip(s->dquant, -2, 2);
2327 
2328  if (s->codec_id == AV_CODEC_ID_MPEG4) {
2329  if (!s->mb_intra) {
2330  if (s->pict_type == AV_PICTURE_TYPE_B) {
2331  if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2332  s->dquant = 0;
2333  }
2334  if (s->mv_type == MV_TYPE_8X8)
2335  s->dquant = 0;
2336  }
2337  }
2338  }
2339  }
2340  ff_set_qscale(s, last_qp + s->dquant);
2341  } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2342  ff_set_qscale(s, s->qscale + s->dquant);
2343 
2344  wrap_y = s->linesize;
2345  wrap_c = s->uvlinesize;
2346  ptr_y = s->new_picture.f->data[0] +
2347  (mb_y * 16 * wrap_y) + mb_x * 16;
2348  ptr_cb = s->new_picture.f->data[1] +
2349  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2350  ptr_cr = s->new_picture.f->data[2] +
2351  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2352 
2353  if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2354  uint8_t *ebuf = s->sc.edge_emu_buffer + 38 * wrap_y;
2355  int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
2356  int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
2357  s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2358  wrap_y, wrap_y,
2359  16, 16, mb_x * 16, mb_y * 16,
2360  s->width, s->height);
2361  ptr_y = ebuf;
2362  s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2363  wrap_c, wrap_c,
2364  mb_block_width, mb_block_height,
2365  mb_x * mb_block_width, mb_y * mb_block_height,
2366  cw, ch);
2367  ptr_cb = ebuf + 16 * wrap_y;
2368  s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2369  wrap_c, wrap_c,
2370  mb_block_width, mb_block_height,
2371  mb_x * mb_block_width, mb_y * mb_block_height,
2372  cw, ch);
2373  ptr_cr = ebuf + 16 * wrap_y + 16;
2374  }
2375 
2376  if (s->mb_intra) {
2378  int progressive_score, interlaced_score;
2379 
2380  s->interlaced_dct = 0;
2381  progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2382  s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2383  NULL, wrap_y, 8) - 400;
2384 
2385  if (progressive_score > 0) {
2386  interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2387  NULL, wrap_y * 2, 8) +
2388  s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2389  NULL, wrap_y * 2, 8);
2390  if (progressive_score > interlaced_score) {
2391  s->interlaced_dct = 1;
2392 
2393  dct_offset = wrap_y;
2394  uv_dct_offset = wrap_c;
2395  wrap_y <<= 1;
2396  if (s->chroma_format == CHROMA_422 ||
2397  s->chroma_format == CHROMA_444)
2398  wrap_c <<= 1;
2399  }
2400  }
2401  }
2402 
2403  s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2404  s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2405  s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2406  s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2407 
2408  if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2409  skip_dct[4] = 1;
2410  skip_dct[5] = 1;
2411  } else {
2412  s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2413  s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2414  if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
2415  s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2416  s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2417  } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
2418  s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2419  s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2420  s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2421  s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2422  s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2423  s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2424  }
2425  }
2426  } else {
2427  op_pixels_func (*op_pix)[4];
2428  qpel_mc_func (*op_qpix)[16];
2429  uint8_t *dest_y, *dest_cb, *dest_cr;
2430 
2431  dest_y = s->dest[0];
2432  dest_cb = s->dest[1];
2433  dest_cr = s->dest[2];
2434 
2435  if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2436  op_pix = s->hdsp.put_pixels_tab;
2437  op_qpix = s->qdsp.put_qpel_pixels_tab;
2438  } else {
2439  op_pix = s->hdsp.put_no_rnd_pixels_tab;
2440  op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2441  }
2442 
2443  if (s->mv_dir & MV_DIR_FORWARD) {
2444  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2445  s->last_picture.f->data,
2446  op_pix, op_qpix);
2447  op_pix = s->hdsp.avg_pixels_tab;
2448  op_qpix = s->qdsp.avg_qpel_pixels_tab;
2449  }
2450  if (s->mv_dir & MV_DIR_BACKWARD) {
2451  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2452  s->next_picture.f->data,
2453  op_pix, op_qpix);
2454  }
2455 
2457  int progressive_score, interlaced_score;
2458 
2459  s->interlaced_dct = 0;
2460  progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2461  s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2462  ptr_y + wrap_y * 8,
2463  wrap_y, 8) - 400;
2464 
2465  if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2466  progressive_score -= 400;
2467 
2468  if (progressive_score > 0) {
2469  interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2470  wrap_y * 2, 8) +
2471  s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2472  ptr_y + wrap_y,
2473  wrap_y * 2, 8);
2474 
2475  if (progressive_score > interlaced_score) {
2476  s->interlaced_dct = 1;
2477 
2478  dct_offset = wrap_y;
2479  uv_dct_offset = wrap_c;
2480  wrap_y <<= 1;
2481  if (s->chroma_format == CHROMA_422)
2482  wrap_c <<= 1;
2483  }
2484  }
2485  }
2486 
2487  s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2488  s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2489  s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2490  dest_y + dct_offset, wrap_y);
2491  s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2492  dest_y + dct_offset + 8, wrap_y);
2493 
2494  if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2495  skip_dct[4] = 1;
2496  skip_dct[5] = 1;
2497  } else {
2498  s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2499  s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2500  if (!s->chroma_y_shift) { /* 422 */
2501  s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2502  dest_cb + uv_dct_offset, wrap_c);
2503  s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2504  dest_cr + uv_dct_offset, wrap_c);
2505  }
2506  }
2507  /* pre quantization */
2508  if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2509  2 * s->qscale * s->qscale) {
2510  // FIXME optimize
2511  if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2512  skip_dct[0] = 1;
2513  if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2514  skip_dct[1] = 1;
2515  if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2516  wrap_y, 8) < 20 * s->qscale)
2517  skip_dct[2] = 1;
2518  if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2519  wrap_y, 8) < 20 * s->qscale)
2520  skip_dct[3] = 1;
2521  if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2522  skip_dct[4] = 1;
2523  if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2524  skip_dct[5] = 1;
2525  if (!s->chroma_y_shift) { /* 422 */
2526  if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2527  dest_cb + uv_dct_offset,
2528  wrap_c, 8) < 20 * s->qscale)
2529  skip_dct[6] = 1;
2530  if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2531  dest_cr + uv_dct_offset,
2532  wrap_c, 8) < 20 * s->qscale)
2533  skip_dct[7] = 1;
2534  }
2535  }
2536  }
2537 
2538  if (s->quantizer_noise_shaping) {
2539  if (!skip_dct[0])
2540  get_visual_weight(weight[0], ptr_y , wrap_y);
2541  if (!skip_dct[1])
2542  get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2543  if (!skip_dct[2])
2544  get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2545  if (!skip_dct[3])
2546  get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2547  if (!skip_dct[4])
2548  get_visual_weight(weight[4], ptr_cb , wrap_c);
2549  if (!skip_dct[5])
2550  get_visual_weight(weight[5], ptr_cr , wrap_c);
2551  if (!s->chroma_y_shift) { /* 422 */
2552  if (!skip_dct[6])
2553  get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2554  wrap_c);
2555  if (!skip_dct[7])
2556  get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2557  wrap_c);
2558  }
2559  memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2560  }
2561 
2562  /* DCT & quantize */
2563  av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2564  {
2565  for (i = 0; i < mb_block_count; i++) {
2566  if (!skip_dct[i]) {
2567  int overflow;
2568  s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2569  // FIXME we could decide to change to quantizer instead of
2570  // clipping
2571  // JS: I don't think that would be a good idea it could lower
2572  // quality instead of improve it. Just INTRADC clipping
2573  // deserves changes in quantizer
2574  if (overflow)
2575  clip_coeffs(s, s->block[i], s->block_last_index[i]);
2576  } else
2577  s->block_last_index[i] = -1;
2578  }
2579  if (s->quantizer_noise_shaping) {
2580  for (i = 0; i < mb_block_count; i++) {
2581  if (!skip_dct[i]) {
2582  s->block_last_index[i] =
2583  dct_quantize_refine(s, s->block[i], weight[i],
2584  orig[i], i, s->qscale);
2585  }
2586  }
2587  }
2588 
2589  if (s->luma_elim_threshold && !s->mb_intra)
2590  for (i = 0; i < 4; i++)
2592  if (s->chroma_elim_threshold && !s->mb_intra)
2593  for (i = 4; i < mb_block_count; i++)
2595 
2596  if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2597  for (i = 0; i < mb_block_count; i++) {
2598  if (s->block_last_index[i] == -1)
2599  s->coded_score[i] = INT_MAX / 256;
2600  }
2601  }
2602  }
2603 
2604  if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2605  s->block_last_index[4] =
2606  s->block_last_index[5] = 0;
2607  s->block[4][0] =
2608  s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2609  if (!s->chroma_y_shift) { /* 422 / 444 */
2610  for (i=6; i<12; i++) {
2611  s->block_last_index[i] = 0;
2612  s->block[i][0] = s->block[4][0];
2613  }
2614  }
2615  }
2616 
2617  // non c quantize code returns incorrect block_last_index FIXME
2618  if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2619  for (i = 0; i < mb_block_count; i++) {
2620  int j;
2621  if (s->block_last_index[i] > 0) {
2622  for (j = 63; j > 0; j--) {
2623  if (s->block[i][s->intra_scantable.permutated[j]])
2624  break;
2625  }
2626  s->block_last_index[i] = j;
2627  }
2628  }
2629  }
2630 
2631  /* huffman encode */
2632  switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2636  ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2637  break;
2638  case AV_CODEC_ID_MPEG4:
2640  ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2641  break;
2642  case AV_CODEC_ID_MSMPEG4V2:
2643  case AV_CODEC_ID_MSMPEG4V3:
2644  case AV_CODEC_ID_WMV1:
2646  ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2647  break;
2648  case AV_CODEC_ID_WMV2:
2649  if (CONFIG_WMV2_ENCODER)
2650  ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2651  break;
2652  case AV_CODEC_ID_H261:
2653  if (CONFIG_H261_ENCODER)
2654  ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2655  break;
2656  case AV_CODEC_ID_H263:
2657  case AV_CODEC_ID_H263P:
2658  case AV_CODEC_ID_FLV1:
2659  case AV_CODEC_ID_RV10:
2660  case AV_CODEC_ID_RV20:
2661  if (CONFIG_H263_ENCODER)
2662  ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2663  break;
2664  case AV_CODEC_ID_MJPEG:
2665  case AV_CODEC_ID_AMV:
2667  ff_mjpeg_encode_mb(s, s->block);
2668  break;
2669  default:
2670  av_assert1(0);
2671  }
2672 }
2673 
2674 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2675 {
2676  if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2677  else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2678  else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2679 }
2680 
2681 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2682  int i;
2683 
2684  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2685 
2686  /* MPEG-1 */
2687  d->mb_skip_run= s->mb_skip_run;
2688  for(i=0; i<3; i++)
2689  d->last_dc[i] = s->last_dc[i];
2690 
2691  /* statistics */
2692  d->mv_bits= s->mv_bits;
2693  d->i_tex_bits= s->i_tex_bits;
2694  d->p_tex_bits= s->p_tex_bits;
2695  d->i_count= s->i_count;
2696  d->f_count= s->f_count;
2697  d->b_count= s->b_count;
2698  d->skip_count= s->skip_count;
2699  d->misc_bits= s->misc_bits;
2700  d->last_bits= 0;
2701 
2702  d->mb_skipped= 0;
2703  d->qscale= s->qscale;
2704  d->dquant= s->dquant;
2705 
2707 }
2708 
2709 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2710  int i;
2711 
2712  memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2713  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2714 
2715  /* MPEG-1 */
2716  d->mb_skip_run= s->mb_skip_run;
2717  for(i=0; i<3; i++)
2718  d->last_dc[i] = s->last_dc[i];
2719 
2720  /* statistics */
2721  d->mv_bits= s->mv_bits;
2722  d->i_tex_bits= s->i_tex_bits;
2723  d->p_tex_bits= s->p_tex_bits;
2724  d->i_count= s->i_count;
2725  d->f_count= s->f_count;
2726  d->b_count= s->b_count;
2727  d->skip_count= s->skip_count;
2728  d->misc_bits= s->misc_bits;
2729 
2730  d->mb_intra= s->mb_intra;
2731  d->mb_skipped= s->mb_skipped;
2732  d->mv_type= s->mv_type;
2733  d->mv_dir= s->mv_dir;
2734  d->pb= s->pb;
2735  if(s->data_partitioning){
2736  d->pb2= s->pb2;
2737  d->tex_pb= s->tex_pb;
2738  }
2739  d->block= s->block;
2740  for(i=0; i<8; i++)
2741  d->block_last_index[i]= s->block_last_index[i];
2743  d->qscale= s->qscale;
2744 
2746 }
2747 
2748 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2750  int *dmin, int *next_block, int motion_x, int motion_y)
2751 {
2752  int score;
2753  uint8_t *dest_backup[3];
2754 
2755  copy_context_before_encode(s, backup, type);
2756 
2757  s->block= s->blocks[*next_block];
2758  s->pb= pb[*next_block];
2759  if(s->data_partitioning){
2760  s->pb2 = pb2 [*next_block];
2761  s->tex_pb= tex_pb[*next_block];
2762  }
2763 
2764  if(*next_block){
2765  memcpy(dest_backup, s->dest, sizeof(s->dest));
2766  s->dest[0] = s->sc.rd_scratchpad;
2767  s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2768  s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2769  av_assert0(s->linesize >= 32); //FIXME
2770  }
2771 
2772  encode_mb(s, motion_x, motion_y);
2773 
2774  score= put_bits_count(&s->pb);
2775  if(s->data_partitioning){
2776  score+= put_bits_count(&s->pb2);
2777  score+= put_bits_count(&s->tex_pb);
2778  }
2779 
2780  if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2782 
2783  score *= s->lambda2;
2784  score += sse_mb(s) << FF_LAMBDA_SHIFT;
2785  }
2786 
2787  if(*next_block){
2788  memcpy(s->dest, dest_backup, sizeof(s->dest));
2789  }
2790 
2791  if(score<*dmin){
2792  *dmin= score;
2793  *next_block^=1;
2794 
2795  copy_context_after_encode(best, s, type);
2796  }
2797 }
2798 
2799 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2800  uint32_t *sq = ff_square_tab + 256;
2801  int acc=0;
2802  int x,y;
2803 
2804  if(w==16 && h==16)
2805  return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2806  else if(w==8 && h==8)
2807  return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2808 
2809  for(y=0; y<h; y++){
2810  for(x=0; x<w; x++){
2811  acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2812  }
2813  }
2814 
2815  av_assert2(acc>=0);
2816 
2817  return acc;
2818 }
2819 
2820 static int sse_mb(MpegEncContext *s){
2821  int w= 16;
2822  int h= 16;
2823 
2824  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2825  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2826 
2827  if(w==16 && h==16)
2828  if(s->avctx->mb_cmp == FF_CMP_NSSE){
2829  return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2830  s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2831  s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2832  }else{
2833  return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2834  s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2835  s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2836  }
2837  else
2838  return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2839  +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2840  +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2841 }
2842 
2844  MpegEncContext *s= *(void**)arg;
2845 
2846 
2847  s->me.pre_pass=1;
2848  s->me.dia_size= s->avctx->pre_dia_size;
2849  s->first_slice_line=1;
2850  for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2851  for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2853  }
2854  s->first_slice_line=0;
2855  }
2856 
2857  s->me.pre_pass=0;
2858 
2859  return 0;
2860 }
2861 
2863  MpegEncContext *s= *(void**)arg;
2864 
2866 
2867  s->me.dia_size= s->avctx->dia_size;
2868  s->first_slice_line=1;
2869  for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2870  s->mb_x=0; //for block init below
2872  for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2873  s->block_index[0]+=2;
2874  s->block_index[1]+=2;
2875  s->block_index[2]+=2;
2876  s->block_index[3]+=2;
2877 
2878  /* compute motion vector & mb_type and store in context */
2881  else
2883  }
2884  s->first_slice_line=0;
2885  }
2886  return 0;
2887 }
2888 
2889 static int mb_var_thread(AVCodecContext *c, void *arg){
2890  MpegEncContext *s= *(void**)arg;
2891  int mb_x, mb_y;
2892 
2894 
2895  for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2896  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2897  int xx = mb_x * 16;
2898  int yy = mb_y * 16;
2899  uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2900  int varc;
2901  int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2902 
2903  varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2904  (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2905 
2906  s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2907  s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2908  s->me.mb_var_sum_temp += varc;
2909  }
2910  }
2911  return 0;
2912 }
2913 
2916  if(s->partitioned_frame){
2918  }
2919 
2920  ff_mpeg4_stuffing(&s->pb);
2921  }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2923  }
2924 
2926  flush_put_bits(&s->pb);
2927 
2928  if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2929  s->misc_bits+= get_bits_diff(s);
2930 }
2931 
2933 {
2934  uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2935  int offset = put_bits_count(&s->pb);
2936  int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2937  int gobn = s->mb_y / s->gob_index;
2938  int pred_x, pred_y;
2939  if (CONFIG_H263_ENCODER)
2940  ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2941  bytestream_put_le32(&ptr, offset);
2942  bytestream_put_byte(&ptr, s->qscale);
2943  bytestream_put_byte(&ptr, gobn);
2944  bytestream_put_le16(&ptr, mba);
2945  bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2946  bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2947  /* 4MV not implemented */
2948  bytestream_put_byte(&ptr, 0); /* hmv2 */
2949  bytestream_put_byte(&ptr, 0); /* vmv2 */
2950 }
2951 
2952 static void update_mb_info(MpegEncContext *s, int startcode)
2953 {
2954  if (!s->mb_info)
2955  return;
2956  if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2957  s->mb_info_size += 12;
2958  s->prev_mb_info = s->last_mb_info;
2959  }
2960  if (startcode) {
2961  s->prev_mb_info = put_bits_count(&s->pb)/8;
2962  /* This might have incremented mb_info_size above, and we return without
2963  * actually writing any info into that slot yet. But in that case,
2964  * this will be called again at the start of the after writing the
2965  * start code, actually writing the mb info. */
2966  return;
2967  }
2968 
2969  s->last_mb_info = put_bits_count(&s->pb)/8;
2970  if (!s->mb_info_size)
2971  s->mb_info_size += 12;
2972  write_mb_info(s);
2973 }
2974 
2975 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2976 {
2977  if ( s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold
2978  && s->slice_context_count == 1
2979  && s->pb.buf == s->avctx->internal->byte_buffer) {
2980  int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2981  int vbv_pos = s->vbv_delay_ptr - s->pb.buf;
2982 
2983  uint8_t *new_buffer = NULL;
2984  int new_buffer_size = 0;
2985 
2986  if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2987  av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2988  return AVERROR(ENOMEM);
2989  }
2990 
2991  emms_c();
2992 
2993  av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2994  s->avctx->internal->byte_buffer_size + size_increase);
2995  if (!new_buffer)
2996  return AVERROR(ENOMEM);
2997 
2998  memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
3000  s->avctx->internal->byte_buffer = new_buffer;
3001  s->avctx->internal->byte_buffer_size = new_buffer_size;
3002  rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
3003  s->ptr_lastgob = s->pb.buf + lastgob_pos;
3004  s->vbv_delay_ptr = s->pb.buf + vbv_pos;
3005  }
3006  if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold)
3007  return AVERROR(EINVAL);
3008  return 0;
3009 }
3010 
3011 static int encode_thread(AVCodecContext *c, void *arg){
3012  MpegEncContext *s= *(void**)arg;
3013  int mb_x, mb_y;
3014  int chr_h= 16>>s->chroma_y_shift;
3015  int i, j;
3016  MpegEncContext best_s = { 0 }, backup_s;
3017  uint8_t bit_buf[2][MAX_MB_BYTES];
3018  uint8_t bit_buf2[2][MAX_MB_BYTES];
3019  uint8_t bit_buf_tex[2][MAX_MB_BYTES];
3020  PutBitContext pb[2], pb2[2], tex_pb[2];
3021 
3023 
3024  for(i=0; i<2; i++){
3025  init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
3026  init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
3027  init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
3028  }
3029 
3030  s->last_bits= put_bits_count(&s->pb);
3031  s->mv_bits=0;
3032  s->misc_bits=0;
3033  s->i_tex_bits=0;
3034  s->p_tex_bits=0;
3035  s->i_count=0;
3036  s->f_count=0;
3037  s->b_count=0;
3038  s->skip_count=0;
3039 
3040  for(i=0; i<3; i++){
3041  /* init last dc values */
3042  /* note: quant matrix value (8) is implied here */
3043  s->last_dc[i] = 128 << s->intra_dc_precision;
3044 
3045  s->current_picture.encoding_error[i] = 0;
3046  }
3047  if(s->codec_id==AV_CODEC_ID_AMV){
3048  s->last_dc[0] = 128*8/13;
3049  s->last_dc[1] = 128*8/14;
3050  s->last_dc[2] = 128*8/14;
3051  }
3052  s->mb_skip_run = 0;
3053  memset(s->last_mv, 0, sizeof(s->last_mv));
3054 
3055  s->last_mv_dir = 0;
3056 
3057  switch(s->codec_id){
3058  case AV_CODEC_ID_H263:
3059  case AV_CODEC_ID_H263P:
3060  case AV_CODEC_ID_FLV1:
3061  if (CONFIG_H263_ENCODER)
3062  s->gob_index = H263_GOB_HEIGHT(s->height);
3063  break;
3064  case AV_CODEC_ID_MPEG4:
3067  break;
3068  }
3069 
3070  s->resync_mb_x=0;
3071  s->resync_mb_y=0;
3072  s->first_slice_line = 1;
3073  s->ptr_lastgob = s->pb.buf;
3074  for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
3075  s->mb_x=0;
3076  s->mb_y= mb_y;
3077 
3078  ff_set_qscale(s, s->qscale);
3080 
3081  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
3082  int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
3083  int mb_type= s->mb_type[xy];
3084 // int d;
3085  int dmin= INT_MAX;
3086  int dir;
3087  int size_increase = s->avctx->internal->byte_buffer_size/4
3088  + s->mb_width*MAX_MB_BYTES;
3089 
3090  ff_mpv_reallocate_putbitbuffer(s, MAX_MB_BYTES, size_increase);
3091  if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
3092  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
3093  return -1;
3094  }
3095  if(s->data_partitioning){
3096  if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
3097  || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
3098  av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
3099  return -1;
3100  }
3101  }
3102 
3103  s->mb_x = mb_x;
3104  s->mb_y = mb_y; // moved into loop, can get changed by H.261
3106 
3109  xy= s->mb_y*s->mb_stride + s->mb_x;
3110  mb_type= s->mb_type[xy];
3111  }
3112 
3113  /* write gob / video packet header */
3114  if(s->rtp_mode){
3115  int current_packet_size, is_gob_start;
3116 
3117  current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
3118 
3119  is_gob_start = s->rtp_payload_size &&
3120  current_packet_size >= s->rtp_payload_size &&
3121  mb_y + mb_x > 0;
3122 
3123  if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
3124 
3125  switch(s->codec_id){
3126  case AV_CODEC_ID_H263:
3127  case AV_CODEC_ID_H263P:
3128  if(!s->h263_slice_structured)
3129  if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
3130  break;
3132  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3134  if(s->mb_skip_run) is_gob_start=0;
3135  break;
3136  case AV_CODEC_ID_MJPEG:
3137  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3138  break;
3139  }
3140 
3141  if(is_gob_start){
3142  if(s->start_mb_y != mb_y || mb_x!=0){
3143  write_slice_end(s);
3144 
3147  }
3148  }
3149 
3150  av_assert2((put_bits_count(&s->pb)&7) == 0);
3151  current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3152 
3153  if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
3154  int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
3155  int d = 100 / s->error_rate;
3156  if(r % d == 0){
3157  current_packet_size=0;
3158  s->pb.buf_ptr= s->ptr_lastgob;
3159  assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3160  }
3161  }
3162 
3163 #if FF_API_RTP_CALLBACK
3165  if (s->avctx->rtp_callback){
3166  int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
3167  s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
3168  }
3170 #endif
3171  update_mb_info(s, 1);
3172 
3173  switch(s->codec_id){
3174  case AV_CODEC_ID_MPEG4:
3175  if (CONFIG_MPEG4_ENCODER) {
3178  }
3179  break;
3185  }
3186  break;
3187  case AV_CODEC_ID_H263:
3188  case AV_CODEC_ID_H263P:
3189  if (CONFIG_H263_ENCODER)
3190  ff_h263_encode_gob_header(s, mb_y);
3191  break;
3192  }
3193 
3194  if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
3195  int bits= put_bits_count(&s->pb);
3196  s->misc_bits+= bits - s->last_bits;
3197  s->last_bits= bits;
3198  }
3199 
3200  s->ptr_lastgob += current_packet_size;
3201  s->first_slice_line=1;
3202  s->resync_mb_x=mb_x;
3203  s->resync_mb_y=mb_y;
3204  }
3205  }
3206 
3207  if( (s->resync_mb_x == s->mb_x)
3208  && s->resync_mb_y+1 == s->mb_y){
3209  s->first_slice_line=0;
3210  }
3211 
3212  s->mb_skipped=0;
3213  s->dquant=0; //only for QP_RD
3214 
3215  update_mb_info(s, 0);
3216 
3217  if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3218  int next_block=0;
3219  int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3220 
3221  copy_context_before_encode(&backup_s, s, -1);
3222  backup_s.pb= s->pb;
3225  if(s->data_partitioning){
3226  backup_s.pb2= s->pb2;
3227  backup_s.tex_pb= s->tex_pb;
3228  }
3229 
3230  if(mb_type&CANDIDATE_MB_TYPE_INTER){
3231  s->mv_dir = MV_DIR_FORWARD;
3232  s->mv_type = MV_TYPE_16X16;
3233  s->mb_intra= 0;
3234  s->mv[0][0][0] = s->p_mv_table[xy][0];
3235  s->mv[0][0][1] = s->p_mv_table[xy][1];
3236  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
3237  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3238  }
3239  if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
3240  s->mv_dir = MV_DIR_FORWARD;
3241  s->mv_type = MV_TYPE_FIELD;
3242  s->mb_intra= 0;
3243  for(i=0; i<2; i++){
3244  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3245  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3246  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3247  }
3248  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
3249  &dmin, &next_block, 0, 0);
3250  }
3251  if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3252  s->mv_dir = MV_DIR_FORWARD;
3253  s->mv_type = MV_TYPE_16X16;
3254  s->mb_intra= 0;
3255  s->mv[0][0][0] = 0;
3256  s->mv[0][0][1] = 0;
3257  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3258  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3259  }
3260  if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3261  s->mv_dir = MV_DIR_FORWARD;
3262  s->mv_type = MV_TYPE_8X8;
3263  s->mb_intra= 0;
3264  for(i=0; i<4; i++){
3265  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3266  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3267  }
3268  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3269  &dmin, &next_block, 0, 0);
3270  }
3271  if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3272  s->mv_dir = MV_DIR_FORWARD;
3273  s->mv_type = MV_TYPE_16X16;
3274  s->mb_intra= 0;
3275  s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3276  s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3277  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3278  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3279  }
3280  if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3281  s->mv_dir = MV_DIR_BACKWARD;
3282  s->mv_type = MV_TYPE_16X16;
3283  s->mb_intra= 0;
3284  s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3285  s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3286  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3287  &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3288  }
3289  if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3291  s->mv_type = MV_TYPE_16X16;
3292  s->mb_intra= 0;
3293  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3294  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3295  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3296  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3297  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3298  &dmin, &next_block, 0, 0);
3299  }
3300  if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3301  s->mv_dir = MV_DIR_FORWARD;
3302  s->mv_type = MV_TYPE_FIELD;
3303  s->mb_intra= 0;
3304  for(i=0; i<2; i++){
3305  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3306  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3307  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3308  }
3309  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3310  &dmin, &next_block, 0, 0);
3311  }
3312  if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3313  s->mv_dir = MV_DIR_BACKWARD;
3314  s->mv_type = MV_TYPE_FIELD;
3315  s->mb_intra= 0;
3316  for(i=0; i<2; i++){
3317  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3318  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3319  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3320  }
3321  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3322  &dmin, &next_block, 0, 0);
3323  }
3324  if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3326  s->mv_type = MV_TYPE_FIELD;
3327  s->mb_intra= 0;
3328  for(dir=0; dir<2; dir++){
3329  for(i=0; i<2; i++){
3330  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3331  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3332  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3333  }
3334  }
3335  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3336  &dmin, &next_block, 0, 0);
3337  }
3338  if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3339  s->mv_dir = 0;
3340  s->mv_type = MV_TYPE_16X16;
3341  s->mb_intra= 1;
3342  s->mv[0][0][0] = 0;
3343  s->mv[0][0][1] = 0;
3344  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3345  &dmin, &next_block, 0, 0);
3346  if(s->h263_pred || s->h263_aic){
3347  if(best_s.mb_intra)
3348  s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3349  else
3350  ff_clean_intra_table_entries(s); //old mode?
3351  }
3352  }
3353 
3354  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3355  if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3356  const int last_qp= backup_s.qscale;
3357  int qpi, qp, dc[6];
3358  int16_t ac[6][16];
3359  const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3360  static const int dquant_tab[4]={-1,1,-2,2};
3361  int storecoefs = s->mb_intra && s->dc_val[0];
3362 
3363  av_assert2(backup_s.dquant == 0);
3364 
3365  //FIXME intra
3366  s->mv_dir= best_s.mv_dir;
3367  s->mv_type = MV_TYPE_16X16;
3368  s->mb_intra= best_s.mb_intra;
3369  s->mv[0][0][0] = best_s.mv[0][0][0];
3370  s->mv[0][0][1] = best_s.mv[0][0][1];
3371  s->mv[1][0][0] = best_s.mv[1][0][0];
3372  s->mv[1][0][1] = best_s.mv[1][0][1];
3373 
3374  qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3375  for(; qpi<4; qpi++){
3376  int dquant= dquant_tab[qpi];
3377  qp= last_qp + dquant;
3378  if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3379  continue;
3380  backup_s.dquant= dquant;
3381  if(storecoefs){
3382  for(i=0; i<6; i++){
3383  dc[i]= s->dc_val[0][ s->block_index[i] ];
3384  memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3385  }
3386  }
3387 
3388  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3389  &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3390  if(best_s.qscale != qp){
3391  if(storecoefs){
3392  for(i=0; i<6; i++){
3393  s->dc_val[0][ s->block_index[i] ]= dc[i];
3394  memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3395  }
3396  }
3397  }
3398  }
3399  }
3400  }
3402  int mx= s->b_direct_mv_table[xy][0];
3403  int my= s->b_direct_mv_table[xy][1];
3404 
3405  backup_s.dquant = 0;
3407  s->mb_intra= 0;
3408  ff_mpeg4_set_direct_mv(s, mx, my);
3409  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3410  &dmin, &next_block, mx, my);
3411  }
3413  backup_s.dquant = 0;
3415  s->mb_intra= 0;
3416  ff_mpeg4_set_direct_mv(s, 0, 0);
3417  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3418  &dmin, &next_block, 0, 0);
3419  }
3420  if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3421  int coded=0;
3422  for(i=0; i<6; i++)
3423  coded |= s->block_last_index[i];
3424  if(coded){
3425  int mx,my;
3426  memcpy(s->mv, best_s.mv, sizeof(s->mv));
3427  if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3428  mx=my=0; //FIXME find the one we actually used
3429  ff_mpeg4_set_direct_mv(s, mx, my);
3430  }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3431  mx= s->mv[1][0][0];
3432  my= s->mv[1][0][1];
3433  }else{
3434  mx= s->mv[0][0][0];
3435  my= s->mv[0][0][1];
3436  }
3437 
3438  s->mv_dir= best_s.mv_dir;
3439  s->mv_type = best_s.mv_type;
3440  s->mb_intra= 0;
3441 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3442  s->mv[0][0][1] = best_s.mv[0][0][1];
3443  s->mv[1][0][0] = best_s.mv[1][0][0];
3444  s->mv[1][0][1] = best_s.mv[1][0][1];*/
3445  backup_s.dquant= 0;
3446  s->skipdct=1;
3447  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3448  &dmin, &next_block, mx, my);
3449  s->skipdct=0;
3450  }
3451  }
3452 
3453  s->current_picture.qscale_table[xy] = best_s.qscale;
3454 
3455  copy_context_after_encode(s, &best_s, -1);
3456 
3457  pb_bits_count= put_bits_count(&s->pb);
3458  flush_put_bits(&s->pb);
3459  avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3460  s->pb= backup_s.pb;
3461 
3462  if(s->data_partitioning){
3463  pb2_bits_count= put_bits_count(&s->pb2);
3464  flush_put_bits(&s->pb2);
3465  avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3466  s->pb2= backup_s.pb2;
3467 
3468  tex_pb_bits_count= put_bits_count(&s->tex_pb);
3469  flush_put_bits(&s->tex_pb);
3470  avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3471  s->tex_pb= backup_s.tex_pb;
3472  }
3473  s->last_bits= put_bits_count(&s->pb);
3474 
3475  if (CONFIG_H263_ENCODER &&
3478 
3479  if(next_block==0){ //FIXME 16 vs linesize16
3480  s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3481  s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3482  s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3483  }
3484 
3487  } else {
3488  int motion_x = 0, motion_y = 0;
3490  // only one MB-Type possible
3491 
3492  switch(mb_type){
3494  s->mv_dir = 0;
3495  s->mb_intra= 1;
3496  motion_x= s->mv[0][0][0] = 0;
3497  motion_y= s->mv[0][0][1] = 0;
3498  break;
3500  s->mv_dir = MV_DIR_FORWARD;
3501  s->mb_intra= 0;
3502  motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3503  motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3504  break;
3506  s->mv_dir = MV_DIR_FORWARD;
3507  s->mv_type = MV_TYPE_FIELD;
3508  s->mb_intra= 0;
3509  for(i=0; i<2; i++){
3510  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3511  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3512  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3513  }
3514  break;
3516  s->mv_dir = MV_DIR_FORWARD;
3517  s->mv_type = MV_TYPE_8X8;
3518  s->mb_intra= 0;
3519  for(i=0; i<4; i++){
3520  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3521  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3522  }
3523  break;
3525  if (CONFIG_MPEG4_ENCODER) {
3527  s->mb_intra= 0;
3528  motion_x=s->b_direct_mv_table[xy][0];
3529  motion_y=s->b_direct_mv_table[xy][1];
3530  ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3531  }
3532  break;
3534  if (CONFIG_MPEG4_ENCODER) {
3536  s->mb_intra= 0;
3537  ff_mpeg4_set_direct_mv(s, 0, 0);
3538  }
3539  break;
3542  s->mb_intra= 0;
3543  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3544  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3545  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3546  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3547  break;
3549  s->mv_dir = MV_DIR_BACKWARD;
3550  s->mb_intra= 0;
3551  motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3552  motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3553  break;
3555  s->mv_dir = MV_DIR_FORWARD;
3556  s->mb_intra= 0;
3557  motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3558  motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3559  break;
3561  s->mv_dir = MV_DIR_FORWARD;
3562  s->mv_type = MV_TYPE_FIELD;
3563  s->mb_intra= 0;
3564  for(i=0; i<2; i++){
3565  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3566  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3567  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3568  }
3569  break;
3571  s->mv_dir = MV_DIR_BACKWARD;
3572  s->mv_type = MV_TYPE_FIELD;
3573  s->mb_intra= 0;
3574  for(i=0; i<2; i++){
3575  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3576  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3577  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3578  }
3579  break;
3582  s->mv_type = MV_TYPE_FIELD;
3583  s->mb_intra= 0;
3584  for(dir=0; dir<2; dir++){
3585  for(i=0; i<2; i++){
3586  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3587  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3588  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3589  }
3590  }
3591  break;
3592  default:
3593  av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3594  }
3595 
3596  encode_mb(s, motion_x, motion_y);
3597 
3598  // RAL: Update last macroblock type
3599  s->last_mv_dir = s->mv_dir;
3600 
3601  if (CONFIG_H263_ENCODER &&
3604 
3606  }
3607 
3608  /* clean the MV table in IPS frames for direct mode in B-frames */
3609  if(s->mb_intra /* && I,P,S_TYPE */){
3610  s->p_mv_table[xy][0]=0;
3611  s->p_mv_table[xy][1]=0;
3612  }
3613 
3614  if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3615  int w= 16;
3616  int h= 16;
3617 
3618  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3619  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3620 
3622  s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3623  s->dest[0], w, h, s->linesize);
3625  s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3626  s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3628  s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3629  s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3630  }
3631  if(s->loop_filter){
3634  }
3635  ff_dlog(s->avctx, "MB %d %d bits\n",
3636  s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3637  }
3638  }
3639 
3640  //not beautiful here but we must write it before flushing so it has to be here
3643 
3644  write_slice_end(s);
3645 
3646 #if FF_API_RTP_CALLBACK
3648  /* Send the last GOB if RTP */
3649  if (s->avctx->rtp_callback) {
3650  int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3651  int pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3652  /* Call the RTP callback to send the last GOB */
3653  emms_c();
3654  s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3655  }
3657 #endif
3658 
3659  return 0;
3660 }
3661 
3662 #define MERGE(field) dst->field += src->field; src->field=0
3664  MERGE(me.scene_change_score);
3665  MERGE(me.mc_mb_var_sum_temp);
3666  MERGE(me.mb_var_sum_temp);
3667 }
3668 
3670  int i;
3671 
3672  MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3673  MERGE(dct_count[1]);
3674  MERGE(mv_bits);
3675  MERGE(i_tex_bits);
3676  MERGE(p_tex_bits);
3677  MERGE(i_count);
3678  MERGE(f_count);
3679  MERGE(b_count);
3680  MERGE(skip_count);
3681  MERGE(misc_bits);
3682  MERGE(er.error_count);
3687 
3688  if (dst->noise_reduction){
3689  for(i=0; i<64; i++){
3690  MERGE(dct_error_sum[0][i]);
3691  MERGE(dct_error_sum[1][i]);
3692  }
3693  }
3694 
3695  assert(put_bits_count(&src->pb) % 8 ==0);
3696  assert(put_bits_count(&dst->pb) % 8 ==0);
3697  avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3698  flush_put_bits(&dst->pb);
3699 }
3700 
3701 static int estimate_qp(MpegEncContext *s, int dry_run){
3702  if (s->next_lambda){
3705  if(!dry_run) s->next_lambda= 0;
3706  } else if (!s->fixed_qscale) {
3707  int quality;
3708 #if CONFIG_LIBXVID
3710  quality = ff_xvid_rate_estimate_qscale(s, dry_run);
3711  else
3712 #endif
3713  quality = ff_rate_estimate_qscale(s, dry_run);
3715  s->current_picture.f->quality = quality;
3716  if (s->current_picture.f->quality < 0)
3717  return -1;
3718  }
3719 
3720  if(s->adaptive_quant){
3721  switch(s->codec_id){
3722  case AV_CODEC_ID_MPEG4:
3725  break;
3726  case AV_CODEC_ID_H263:
3727  case AV_CODEC_ID_H263P:
3728  case AV_CODEC_ID_FLV1:
3729  if (CONFIG_H263_ENCODER)
3731  break;
3732  default:
3733  ff_init_qscale_tab(s);
3734  }
3735 
3736  s->lambda= s->lambda_table[0];
3737  //FIXME broken
3738  }else
3739  s->lambda = s->current_picture.f->quality;
3740  update_qscale(s);
3741  return 0;
3742 }
3743 
3744 /* must be called before writing the header */
3747  s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3748 
3749  if(s->pict_type==AV_PICTURE_TYPE_B){
3750  s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3751  assert(s->pb_time > 0 && s->pb_time < s->pp_time);
3752  }else{
3753  s->pp_time= s->time - s->last_non_b_time;
3754  s->last_non_b_time= s->time;
3755  assert(s->picture_number==0 || s->pp_time > 0);
3756  }
3757 }
3758 
3760 {
3761  int i, ret;
3762  int bits;
3763  int context_count = s->slice_context_count;
3764 
3766 
3767  /* Reset the average MB variance */
3768  s->me.mb_var_sum_temp =
3769  s->me.mc_mb_var_sum_temp = 0;
3770 
3771  /* we need to initialize some time vars before we can encode B-frames */
3772  // RAL: Condition added for MPEG1VIDEO
3776  ff_set_mpeg4_time(s);
3777 
3778  s->me.scene_change_score=0;
3779 
3780 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3781 
3782  if(s->pict_type==AV_PICTURE_TYPE_I){
3783  if(s->msmpeg4_version >= 3) s->no_rounding=1;
3784  else s->no_rounding=0;
3785  }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3787  s->no_rounding ^= 1;
3788  }
3789 
3790  if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3791  if (estimate_qp(s,1) < 0)
3792  return -1;
3793  ff_get_2pass_fcode(s);
3794  } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3796  s->lambda= s->last_lambda_for[s->pict_type];
3797  else
3799  update_qscale(s);
3800  }
3801 
3807  }
3808 
3809  s->mb_intra=0; //for the rate distortion & bit compare functions
3810  for(i=1; i<context_count; i++){
3812  if (ret < 0)
3813  return ret;
3814  }
3815 
3816  if(ff_init_me(s)<0)
3817  return -1;
3818 
3819  /* Estimate motion for every MB */
3820  if(s->pict_type != AV_PICTURE_TYPE_I){
3821  s->lambda = (s->lambda * s->me_penalty_compensation + 128) >> 8;
3822  s->lambda2 = (s->lambda2 * (int64_t) s->me_penalty_compensation + 128) >> 8;
3823  if (s->pict_type != AV_PICTURE_TYPE_B) {
3824  if ((s->me_pre && s->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3825  s->me_pre == 2) {
3826  s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3827  }
3828  }
3829 
3830  s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3831  }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3832  /* I-Frame */
3833  for(i=0; i<s->mb_stride*s->mb_height; i++)
3835 
3836  if(!s->fixed_qscale){
3837  /* finding spatial complexity for I-frame rate control */
3838  s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3839  }
3840  }
3841  for(i=1; i<context_count; i++){
3843  }
3845  s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3846  emms_c();
3847 
3849  s->pict_type == AV_PICTURE_TYPE_P) {
3851  for(i=0; i<s->mb_stride*s->mb_height; i++)
3853  if(s->msmpeg4_version >= 3)
3854  s->no_rounding=1;
3855  ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3857  }
3858 
3859  if(!s->umvplus){
3862 
3864  int a,b;
3865  a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3867  s->f_code= FFMAX3(s->f_code, a, b);
3868  }
3869 
3870  ff_fix_long_p_mvs(s);
3873  int j;
3874  for(i=0; i<2; i++){
3875  for(j=0; j<2; j++)
3878  }
3879  }
3880  }
3881 
3882  if(s->pict_type==AV_PICTURE_TYPE_B){
3883  int a, b;
3884 
3887  s->f_code = FFMAX(a, b);
3888 
3891  s->b_code = FFMAX(a, b);
3892 
3898  int dir, j;
3899  for(dir=0; dir<2; dir++){
3900  for(i=0; i<2; i++){
3901  for(j=0; j<2; j++){
3904  ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3905  s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3906  }
3907  }
3908  }
3909  }
3910  }
3911  }
3912 
3913  if (estimate_qp(s, 0) < 0)
3914  return -1;
3915 
3916  if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3917  s->pict_type == AV_PICTURE_TYPE_I &&
3918  !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3919  s->qscale= 3; //reduce clipping problems
3920 
3921  if (s->out_format == FMT_MJPEG) {
3922  const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3923  const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3924 
3925  if (s->avctx->intra_matrix) {
3926  chroma_matrix =
3927  luma_matrix = s->avctx->intra_matrix;
3928  }
3929  if (s->avctx->chroma_intra_matrix)
3930  chroma_matrix = s->avctx->chroma_intra_matrix;
3931 
3932  /* for mjpeg, we do include qscale in the matrix */
3933  for(i=1;i<64;i++){
3934  int j = s->idsp.idct_permutation[i];
3935 
3936  s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3937  s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3938  }
3939  s->y_dc_scale_table=
3941  s->chroma_intra_matrix[0] =
3944  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3946  s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3947  s->qscale= 8;
3948  }
3949  if(s->codec_id == AV_CODEC_ID_AMV){
3950  static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3951  static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3952  for(i=1;i<64;i++){
3953  int j= s->idsp.idct_permutation[ff_zigzag_direct[i]];
3954 
3955  s->intra_matrix[j] = sp5x_quant_table[5*2+0][i];
3956  s->chroma_intra_matrix[j] = sp5x_quant_table[5*2+1][i];
3957  }
3958  s->y_dc_scale_table= y;
3959  s->c_dc_scale_table= c;
3960  s->intra_matrix[0] = 13;
3961  s->chroma_intra_matrix[0] = 14;
3963  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3965  s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3966  s->qscale= 8;
3967  }
3968 
3969  //FIXME var duplication
3971  s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3974 
3975  if (s->current_picture.f->key_frame)
3976  s->picture_in_gop_number=0;
3977 
3978  s->mb_x = s->mb_y = 0;
3979  s->last_bits= put_bits_count(&s->pb);
3980  switch(s->out_format) {
3981  case FMT_MJPEG:
3985  break;
3986  case FMT_H261:
3987  if (CONFIG_H261_ENCODER)
3988  ff_h261_encode_picture_header(s, picture_number);
3989  break;
3990  case FMT_H263:
3992  ff_wmv2_encode_picture_header(s, picture_number);
3993  else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3994  ff_msmpeg4_encode_picture_header(s, picture_number);
3995  else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3996  ret = ff_mpeg4_encode_picture_header(s, picture_number);
3997  if (ret < 0)
3998  return ret;
3999  } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
4000  ret = ff_rv10_encode_picture_header(s, picture_number);
4001  if (ret < 0)
4002  return ret;
4003  }
4004  else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
4005  ff_rv20_encode_picture_header(s, picture_number);
4006  else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
4007  ff_flv_encode_picture_header(s, picture_number);
4008  else if (CONFIG_H263_ENCODER)
4009  ff_h263_encode_picture_header(s, picture_number);
4010  break;
4011  case FMT_MPEG1:
4013  ff_mpeg1_encode_picture_header(s, picture_number);
4014  break;
4015  default:
4016  av_assert0(0);
4017  }
4018  bits= put_bits_count(&s->pb);
4019  s->header_bits= bits - s->last_bits;
4020 
4021  for(i=1; i<context_count; i++){
4023  }
4024  s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
4025  for(i=1; i<context_count; i++){
4026  if (s->pb.buf_end == s->thread_context[i]->pb.buf)
4027  set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-32));
4029  }
4030  emms_c();
4031  return 0;
4032 }
4033 
4034 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
4035  const int intra= s->mb_intra;
4036  int i;
4037 
4038  s->dct_count[intra]++;
4039 
4040  for(i=0; i<64; i++){
4041  int level= block[i];
4042 
4043  if(level){
4044  if(level>0){
4045  s->dct_error_sum[intra][i] += level;
4046  level -= s->dct_offset[intra][i];
4047  if(level<0) level=0;
4048  }else{
4049  s->dct_error_sum[intra][i] -= level;
4050  level += s->dct_offset[intra][i];
4051  if(level>0) level=0;
4052  }
4053  block[i]= level;
4054  }
4055  }
4056 }
4057 
4059  int16_t *block, int n,
4060  int qscale, int *overflow){
4061  const int *qmat;
4062  const uint16_t *matrix;
4063  const uint8_t *scantable;
4064  const uint8_t *perm_scantable;
4065  int max=0;
4066  unsigned int threshold1, threshold2;
4067  int bias=0;
4068  int run_tab[65];
4069  int level_tab[65];
4070  int score_tab[65];
4071  int survivor[65];
4072  int survivor_count;
4073  int last_run=0;
4074  int last_level=0;
4075  int last_score= 0;
4076  int last_i;
4077  int coeff[2][64];
4078  int coeff_count[64];
4079  int qmul, qadd, start_i, last_non_zero, i, dc;
4080  const int esc_length= s->ac_esc_length;
4081  uint8_t * length;
4082  uint8_t * last_length;
4083  const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
4084  int mpeg2_qscale;
4085 
4086  s->fdsp.fdct(block);
4087 
4088  if(s->dct_error_sum)
4089  s->denoise_dct(s, block);
4090  qmul= qscale*16;
4091  qadd= ((qscale-1)|1)*8;
4092 
4093  if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
4094  else mpeg2_qscale = qscale << 1;
4095 
4096  if (s->mb_intra) {
4097  int q;
4098  scantable= s->intra_scantable.scantable;
4099  perm_scantable= s->intra_scantable.permutated;
4100  if (!s->h263_aic) {
4101  if (n < 4)
4102  q = s->y_dc_scale;
4103  else
4104  q = s->c_dc_scale;
4105  q = q << 3;
4106  } else{
4107  /* For AIC we skip quant/dequant of INTRADC */
4108  q = 1 << 3;
4109  qadd=0;
4110  }
4111 
4112  /* note: block[0] is assumed to be positive */
4113  block[0] = (block[0] + (q >> 1)) / q;
4114  start_i = 1;
4115  last_non_zero = 0;
4116  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4117  matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
4118  if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
4119  bias= 1<<(QMAT_SHIFT-1);
4120 
4121  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4122  length = s->intra_chroma_ac_vlc_length;
4123  last_length= s->intra_chroma_ac_vlc_last_length;
4124  } else {
4125  length = s->intra_ac_vlc_length;
4126  last_length= s->intra_ac_vlc_last_length;
4127  }
4128  } else {
4129  scantable= s->inter_scantable.scantable;
4130  perm_scantable= s->inter_scantable.permutated;
4131  start_i = 0;
4132  last_non_zero = -1;
4133  qmat = s->q_inter_matrix[qscale];
4134  matrix = s->inter_matrix;
4135  length = s->inter_ac_vlc_length;
4136  last_length= s->inter_ac_vlc_last_length;
4137  }
4138  last_i= start_i;
4139 
4140  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4141  threshold2= (threshold1<<1);
4142 
4143  for(i=63; i>=start_i; i--) {
4144  const int j = scantable[i];
4145  int level = block[j] * qmat[j];
4146 
4147  if(((unsigned)(level+threshold1))>threshold2){
4148  last_non_zero = i;
4149  break;
4150  }
4151  }
4152 
4153  for(i=start_i; i<=last_non_zero; i++) {
4154  const int j = scantable[i];
4155  int level = block[j] * qmat[j];
4156 
4157 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4158 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4159  if(((unsigned)(level+threshold1))>threshold2){
4160  if(level>0){
4161  level= (bias + level)>>QMAT_SHIFT;
4162  coeff[0][i]= level;
4163  coeff[1][i]= level-1;
4164 // coeff[2][k]= level-2;
4165  }else{
4166  level= (bias - level)>>QMAT_SHIFT;
4167  coeff[0][i]= -level;
4168  coeff[1][i]= -level+1;
4169 // coeff[2][k]= -level+2;
4170  }
4171  coeff_count[i]= FFMIN(level, 2);
4172  av_assert2(coeff_count[i]);
4173  max |=level;
4174  }else{
4175  coeff[0][i]= (level>>31)|1;
4176  coeff_count[i]= 1;
4177  }
4178  }
4179 
4180  *overflow= s->max_qcoeff < max; //overflow might have happened
4181 
4182  if(last_non_zero < start_i){
4183  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4184  return last_non_zero;
4185  }
4186 
4187  score_tab[start_i]= 0;
4188  survivor[0]= start_i;
4189  survivor_count= 1;
4190 
4191  for(i=start_i; i<=last_non_zero; i++){
4192  int level_index, j, zero_distortion;
4193  int dct_coeff= FFABS(block[ scantable[i] ]);
4194  int best_score=256*256*256*120;
4195 
4196  if (s->fdsp.fdct == ff_fdct_ifast)
4197  dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4198  zero_distortion= dct_coeff*dct_coeff;
4199 
4200  for(level_index=0; level_index < coeff_count[i]; level_index++){
4201  int distortion;
4202  int level= coeff[level_index][i];
4203  const int alevel= FFABS(level);
4204  int unquant_coeff;
4205 
4206  av_assert2(level);
4207 
4208  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4209  unquant_coeff= alevel*qmul + qadd;
4210  } else if(s->out_format == FMT_MJPEG) {
4211  j = s->idsp.idct_permutation[scantable[i]];
4212  unquant_coeff = alevel * matrix[j] * 8;
4213  }else{ // MPEG-1
4214  j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4215  if(s->mb_intra){
4216  unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4217  unquant_coeff = (unquant_coeff - 1) | 1;
4218  }else{
4219  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4220  unquant_coeff = (unquant_coeff - 1) | 1;
4221  }
4222  unquant_coeff<<= 3;
4223  }
4224 
4225  distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4226  level+=64;
4227  if((level&(~127)) == 0){
4228  for(j=survivor_count-1; j>=0; j--){
4229  int run= i - survivor[j];
4230  int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4231  score += score_tab[i-run];
4232 
4233  if(score < best_score){
4234  best_score= score;
4235  run_tab[i+1]= run;
4236  level_tab[i+1]= level-64;
4237  }
4238  }
4239 
4240  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4241  for(j=survivor_count-1; j>=0; j--){
4242  int run= i - survivor[j];
4243  int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4244  score += score_tab[i-run];
4245  if(score < last_score){
4246  last_score= score;
4247  last_run= run;
4248  last_level= level-64;
4249  last_i= i+1;
4250  }
4251  }
4252  }
4253  }else{
4254  distortion += esc_length*lambda;
4255  for(j=survivor_count-1; j>=0; j--){
4256  int run= i - survivor[j];
4257  int score= distortion + score_tab[i-run];
4258 
4259  if(score < best_score){
4260  best_score= score;
4261  run_tab[i+1]= run;
4262  level_tab[i+1]= level-64;
4263  }
4264  }
4265 
4266  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4267  for(j=survivor_count-1; j>=0; j--){
4268  int run= i - survivor[j];
4269  int score= distortion + score_tab[i-run];
4270  if(score < last_score){
4271  last_score= score;
4272  last_run= run;
4273  last_level= level-64;
4274  last_i= i+1;
4275  }
4276  }
4277  }
4278  }
4279  }
4280 
4281  score_tab[i+1]= best_score;
4282 
4283  // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4284  if(last_non_zero <= 27){
4285  for(; survivor_count; survivor_count--){
4286  if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4287  break;
4288  }
4289  }else{
4290  for(; survivor_count; survivor_count--){
4291  if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4292  break;
4293  }
4294  }
4295 
4296  survivor[ survivor_count++ ]= i+1;
4297  }
4298 
4299  if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4300  last_score= 256*256*256*120;
4301  for(i= survivor[0]; i<=last_non_zero + 1; i++){
4302  int score= score_tab[i];
4303  if (i)
4304  score += lambda * 2; // FIXME more exact?
4305 
4306  if(score < last_score){
4307  last_score= score;
4308  last_i= i;
4309  last_level= level_tab[i];
4310  last_run= run_tab[i];
4311  }
4312  }
4313  }
4314 
4315  s->coded_score[n] = last_score;
4316 
4317  dc= FFABS(block[0]);
4318  last_non_zero= last_i - 1;
4319  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4320 
4321  if(last_non_zero < start_i)
4322  return last_non_zero;
4323 
4324  if(last_non_zero == 0 && start_i == 0){
4325  int best_level= 0;
4326  int best_score= dc * dc;
4327 
4328  for(i=0; i<coeff_count[0]; i++){
4329  int level= coeff[i][0];
4330  int alevel= FFABS(level);
4331  int unquant_coeff, score, distortion;
4332 
4333  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4334  unquant_coeff= (alevel*qmul + qadd)>>3;
4335  } else{ // MPEG-1
4336  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4337  unquant_coeff = (unquant_coeff - 1) | 1;
4338  }
4339  unquant_coeff = (unquant_coeff + 4) >> 3;
4340  unquant_coeff<<= 3 + 3;
4341 
4342  distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4343  level+=64;
4344  if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4345  else score= distortion + esc_length*lambda;
4346 
4347  if(score < best_score){
4348  best_score= score;
4349  best_level= level - 64;
4350  }
4351  }
4352  block[0]= best_level;
4353  s->coded_score[n] = best_score - dc*dc;
4354  if(best_level == 0) return -1;
4355  else return last_non_zero;
4356  }
4357 
4358  i= last_i;
4359  av_assert2(last_level);
4360 
4361  block[ perm_scantable[last_non_zero] ]= last_level;
4362  i -= last_run + 1;
4363 
4364  for(; i>start_i; i -= run_tab[i] + 1){
4365  block[ perm_scantable[i-1] ]= level_tab[i];
4366  }
4367 
4368  return last_non_zero;
4369 }
4370 
4371 //#define REFINE_STATS 1
4372 static int16_t basis[64][64];
4373 
4374 static void build_basis(uint8_t *perm){
4375  int i, j, x, y;
4376  emms_c();
4377  for(i=0; i<8; i++){
4378  for(j=0; j<8; j++){
4379  for(y=0; y<8; y++){
4380  for(x=0; x<8; x++){
4381  double s= 0.25*(1<<BASIS_SHIFT);
4382  int index= 8*i + j;
4383  int perm_index= perm[index];
4384  if(i==0) s*= sqrt(0.5);
4385  if(j==0) s*= sqrt(0.5);
4386  basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4387  }
4388  }
4389  }
4390  }
4391 }
4392 
4393 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4394  int16_t *block, int16_t *weight, int16_t *orig,
4395  int n, int qscale){
4396  int16_t rem[64];
4397  LOCAL_ALIGNED_16(int16_t, d1, [64]);
4398  const uint8_t *scantable;
4399  const uint8_t *perm_scantable;
4400 // unsigned int threshold1, threshold2;
4401 // int bias=0;
4402  int run_tab[65];
4403  int prev_run=0;
4404  int prev_level=0;
4405  int qmul, qadd, start_i, last_non_zero, i, dc;
4406  uint8_t * length;
4407  uint8_t * last_length;
4408  int lambda;
4409  int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4410 #ifdef REFINE_STATS
4411 static int count=0;
4412 static int after_last=0;
4413 static int to_zero=0;
4414 static int from_zero=0;
4415 static int raise=0;
4416 static int lower=0;
4417 static int messed_sign=0;
4418 #endif
4419 
4420  if(basis[0][0] == 0)
4422 
4423  qmul= qscale*2;
4424  qadd= (qscale-1)|1;
4425  if (s->mb_intra) {
4426  scantable= s->intra_scantable.scantable;
4427  perm_scantable= s->intra_scantable.permutated;
4428  if (!s->h263_aic) {
4429  if (n < 4)
4430  q = s->y_dc_scale;
4431  else
4432  q = s->c_dc_scale;
4433  } else{
4434  /* For AIC we skip quant/dequant of INTRADC */
4435  q = 1;
4436  qadd=0;
4437  }
4438  q <<= RECON_SHIFT-3;
4439  /* note: block[0] is assumed to be positive */
4440  dc= block[0]*q;
4441 // block[0] = (block[0] + (q >> 1)) / q;
4442  start_i = 1;
4443 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4444 // bias= 1<<(QMAT_SHIFT-1);
4445  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4446  length = s->intra_chroma_ac_vlc_length;
4447  last_length= s->intra_chroma_ac_vlc_last_length;
4448  } else {
4449  length = s->intra_ac_vlc_length;
4450  last_length= s->intra_ac_vlc_last_length;
4451  }
4452  } else {
4453  scantable= s->inter_scantable.scantable;
4454  perm_scantable= s->inter_scantable.permutated;
4455  dc= 0;
4456  start_i = 0;
4457  length = s->inter_ac_vlc_length;
4458  last_length= s->inter_ac_vlc_last_length;
4459  }
4460  last_non_zero = s->block_last_index[n];
4461 
4462 #ifdef REFINE_STATS
4463 {START_TIMER
4464 #endif
4465  dc += (1<<(RECON_SHIFT-1));
4466  for(i=0; i<64; i++){
4467  rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4468  }
4469 #ifdef REFINE_STATS
4470 STOP_TIMER("memset rem[]")}
4471 #endif
4472  sum=0;
4473  for(i=0; i<64; i++){
4474  int one= 36;
4475  int qns=4;
4476  int w;
4477 
4478  w= FFABS(weight[i]) + qns*one;
4479  w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4480 
4481  weight[i] = w;
4482 // w=weight[i] = (63*qns + (w/2)) / w;
4483 
4484  av_assert2(w>0);
4485  av_assert2(w<(1<<6));
4486  sum += w*w;
4487  }
4488  lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4489 #ifdef REFINE_STATS
4490 {START_TIMER
4491 #endif
4492  run=0;
4493  rle_index=0;
4494  for(i=start_i; i<=last_non_zero; i++){
4495  int j= perm_scantable[i];
4496  const int level= block[j];
4497  int coeff;
4498 
4499  if(level){
4500  if(level<0) coeff= qmul*level - qadd;
4501  else coeff= qmul*level + qadd;
4502  run_tab[rle_index++]=run;
4503  run=0;
4504 
4505  s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4506  }else{
4507  run++;
4508  }
4509  }
4510 #ifdef REFINE_STATS
4511 if(last_non_zero>0){
4512 STOP_TIMER("init rem[]")
4513 }
4514 }
4515 
4516 {START_TIMER
4517 #endif
4518  for(;;){
4519  int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4520  int best_coeff=0;
4521  int best_change=0;
4522  int run2, best_unquant_change=0, analyze_gradient;
4523 #ifdef REFINE_STATS
4524 {START_TIMER
4525 #endif
4526  analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4527 
4528  if(analyze_gradient){
4529 #ifdef REFINE_STATS
4530 {START_TIMER
4531 #endif
4532  for(i=0; i<64; i++){
4533  int w= weight[i];
4534 
4535  d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4536  }
4537 #ifdef REFINE_STATS
4538 STOP_TIMER("rem*w*w")}
4539 {START_TIMER
4540 #endif
4541  s->fdsp.fdct(d1);
4542 #ifdef REFINE_STATS
4543 STOP_TIMER("dct")}
4544 #endif
4545  }
4546 
4547  if(start_i){
4548  const int level= block[0];
4549  int change, old_coeff;
4550 
4551  av_assert2(s->mb_intra);
4552 
4553  old_coeff= q*level;
4554 
4555  for(change=-1; change<=1; change+=2){
4556  int new_level= level + change;
4557  int score, new_coeff;
4558 
4559  new_coeff= q*new_level;
4560  if(new_coeff >= 2048 || new_coeff < 0)
4561  continue;
4562 
4563  score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4564  new_coeff - old_coeff);
4565  if(score<best_score){
4566  best_score= score;
4567  best_coeff= 0;
4568  best_change= change;
4569  best_unquant_change= new_coeff - old_coeff;
4570  }
4571  }
4572  }
4573 
4574  run=0;
4575  rle_index=0;
4576  run2= run_tab[rle_index++];
4577  prev_level=0;
4578  prev_run=0;
4579 
4580  for(i=start_i; i<64; i++){
4581  int j= perm_scantable[i];
4582  const int level= block[j];
4583  int change, old_coeff;
4584 
4585  if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4586  break;
4587 
4588  if(level){
4589  if(level<0) old_coeff= qmul*level - qadd;
4590  else old_coeff= qmul*level + qadd;
4591  run2= run_tab[rle_index++]; //FIXME ! maybe after last
4592  }else{
4593  old_coeff=0;
4594  run2--;
4595  av_assert2(run2>=0 || i >= last_non_zero );
4596  }
4597 
4598  for(change=-1; change<=1; change+=2){
4599  int new_level= level + change;
4600  int score, new_coeff, unquant_change;
4601 
4602  score=0;
4603  if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4604  continue;
4605 
4606  if(new_level){
4607  if(new_level<0) new_coeff= qmul*new_level - qadd;
4608  else new_coeff= qmul*new_level + qadd;
4609  if(new_coeff >= 2048 || new_coeff <= -2048)
4610  continue;
4611  //FIXME check for overflow
4612 
4613  if(level){
4614  if(level < 63 && level > -63){
4615  if(i < last_non_zero)
4616  score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4617  - length[UNI_AC_ENC_INDEX(run, level+64)];
4618  else
4619  score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4620  - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4621  }
4622  }else{
4623  av_assert2(FFABS(new_level)==1);
4624 
4625  if(analyze_gradient){
4626  int g= d1[ scantable[i] ];
4627  if(g && (g^new_level) >= 0)
4628  continue;
4629  }
4630 
4631  if(i < last_non_zero){
4632  int next_i= i + run2 + 1;
4633  int next_level= block[ perm_scantable[next_i] ] + 64;
4634 
4635  if(next_level&(~127))
4636  next_level= 0;
4637 
4638  if(next_i < last_non_zero)
4639  score += length[UNI_AC_ENC_INDEX(run, 65)]
4640  + length[UNI_AC_ENC_INDEX(run2, next_level)]
4641  - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4642  else
4643  score += length[UNI_AC_ENC_INDEX(run, 65)]
4644  + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4645  - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4646  }else{
4647  score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4648  if(prev_level){
4649  score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4650  - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4651  }
4652  }
4653  }
4654  }else{
4655  new_coeff=0;
4656  av_assert2(FFABS(level)==1);
4657 
4658  if(i < last_non_zero){
4659  int next_i= i + run2 + 1;
4660  int next_level= block[ perm_scantable[next_i] ] + 64;
4661 
4662  if(next_level&(~127))
4663  next_level= 0;
4664 
4665  if(next_i < last_non_zero)
4666  score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4667  - length[UNI_AC_ENC_INDEX(run2, next_level)]
4668  - length[UNI_AC_ENC_INDEX(run, 65)];
4669  else
4670  score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4671  - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4672  - length[UNI_AC_ENC_INDEX(run, 65)];
4673  }else{
4674  score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4675  if(prev_level){
4676  score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4677  - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4678  }
4679  }
4680  }
4681 
4682  score *= lambda;
4683 
4684  unquant_change= new_coeff - old_coeff;
4685  av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4686 
4687  score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4688  unquant_change);
4689  if(score<best_score){
4690  best_score= score;
4691  best_coeff= i;
4692  best_change= change;
4693  best_unquant_change= unquant_change;
4694  }
4695  }
4696  if(level){
4697  prev_level= level + 64;
4698  if(prev_level&(~127))
4699  prev_level= 0;
4700  prev_run= run;
4701  run=0;
4702  }else{
4703  run++;
4704  }
4705  }
4706 #ifdef REFINE_STATS
4707 STOP_TIMER("iterative step")}
4708 #endif
4709 
4710  if(best_change){
4711  int j= perm_scantable[ best_coeff ];
4712 
4713  block[j] += best_change;
4714 
4715  if(best_coeff > last_non_zero){
4716  last_non_zero= best_coeff;
4717  av_assert2(block[j]);
4718 #ifdef REFINE_STATS
4719 after_last++;
4720 #endif
4721  }else{
4722 #ifdef REFINE_STATS
4723 if(block[j]){
4724  if(block[j] - best_change){
4725  if(FFABS(block[j]) > FFABS(block[j] - best_change)){
4726  raise++;
4727  }else{
4728  lower++;
4729  }
4730  }else{
4731  from_zero++;
4732  }
4733 }else{
4734  to_zero++;
4735 }
4736 #endif
4737  for(; last_non_zero>=start_i; last_non_zero--){
4738  if(block[perm_scantable[last_non_zero]])
4739  break;
4740  }
4741  }
4742 #ifdef REFINE_STATS
4743 count++;
4744 if(256*256*256*64 % count == 0){
4745  av_log(s->avctx, AV_LOG_DEBUG, "after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero, raise, lower, messed_sign, s->mb_x, s->mb_y, s->picture_number);
4746 }
4747 #endif
4748  run=0;
4749  rle_index=0;
4750  for(i=start_i; i<=last_non_zero; i++){
4751  int j= perm_scantable[i];
4752  const int level= block[j];
4753 
4754  if(level){
4755  run_tab[rle_index++]=run;
4756  run=0;
4757  }else{
4758  run++;
4759  }
4760  }
4761 
4762  s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4763  }else{
4764  break;
4765  }
4766  }
4767 #ifdef REFINE_STATS
4768 if(last_non_zero>0){
4769 STOP_TIMER("iterative search")
4770 }
4771 }
4772 #endif
4773 
4774  return last_non_zero;
4775 }
4776 
4777 /**
4778  * Permute an 8x8 block according to permutation.
4779  * @param block the block which will be permuted according to
4780  * the given permutation vector
4781  * @param permutation the permutation vector
4782  * @param last the last non zero coefficient in scantable order, used to
4783  * speed the permutation up
4784  * @param scantable the used scantable, this is only used to speed the
4785  * permutation up, the block is not (inverse) permutated
4786  * to scantable order!
4787  */
4788 void ff_block_permute(int16_t *block, uint8_t *permutation,
4789  const uint8_t *scantable, int last)
4790 {
4791  int i;
4792  int16_t temp[64];
4793 
4794  if (last <= 0)
4795  return;
4796  //FIXME it is ok but not clean and might fail for some permutations
4797  // if (permutation[1] == 1)
4798  // return;
4799 
4800  for (i = 0; i <= last; i++) {
4801  const int j = scantable[i];
4802  temp[j] = block[j];
4803  block[j] = 0;
4804  }
4805 
4806  for (i = 0; i <= last; i++) {
4807  const int j = scantable[i];
4808  const int perm_j = permutation[j];
4809  block[perm_j] = temp[j];
4810  }
4811 }
4812 
4814  int16_t *block, int n,
4815  int qscale, int *overflow)
4816 {
4817  int i, j, level, last_non_zero, q, start_i;
4818  const int *qmat;
4819  const uint8_t *scantable;
4820  int bias;
4821  int max=0;
4822  unsigned int threshold1, threshold2;
4823 
4824  s->fdsp.fdct(block);
4825 
4826  if(s->dct_error_sum)
4827  s->denoise_dct(s, block);
4828 
4829  if (s->mb_intra) {
4830  scantable= s->intra_scantable.scantable;
4831  if (!s->h263_aic) {
4832  if (n < 4)
4833  q = s->y_dc_scale;
4834  else
4835  q = s->c_dc_scale;
4836  q = q << 3;
4837  } else
4838  /* For AIC we skip quant/dequant of INTRADC */
4839  q = 1 << 3;
4840 
4841  /* note: block[0] is assumed to be positive */
4842  block[0] = (block[0] + (q >> 1)) / q;
4843  start_i = 1;
4844  last_non_zero = 0;
4845  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4846  bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4847  } else {
4848  scantable= s->inter_scantable.scantable;
4849  start_i = 0;
4850  last_non_zero = -1;
4851  qmat = s->q_inter_matrix[qscale];
4852  bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4853  }
4854  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4855  threshold2= (threshold1<<1);
4856  for(i=63;i>=start_i;i--) {
4857  j = scantable[i];
4858  level = block[j] * qmat[j];
4859 
4860  if(((unsigned)(level+threshold1))>threshold2){
4861  last_non_zero = i;
4862  break;
4863  }else{
4864  block[j]=0;
4865  }
4866  }
4867  for(i=start_i; i<=last_non_zero; i++) {
4868  j = scantable[i];
4869  level = block[j] * qmat[j];
4870 
4871 // if( bias+level >= (1<<QMAT_SHIFT)
4872 // || bias-level >= (1<<QMAT_SHIFT)){
4873  if(((unsigned)(level+threshold1))>threshold2){
4874  if(level>0){
4875  level= (bias + level)>>QMAT_SHIFT;
4876  block[j]= level;
4877  }else{
4878  level= (bias - level)>>QMAT_SHIFT;
4879  block[j]= -level;
4880  }
4881  max |=level;
4882  }else{
4883  block[j]=0;
4884  }
4885  }
4886  *overflow= s->max_qcoeff < max; //overflow might have happened
4887 
4888  /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4889  if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4891  scantable, last_non_zero);
4892 
4893  return last_non_zero;
4894 }
4895 
4896 #define OFFSET(x) offsetof(MpegEncContext, x)
4897 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4898 static const AVOption h263_options[] = {
4899  { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4900  { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4902  { NULL },
4903 };
4904 
4905 static const AVClass h263_class = {
4906  .class_name = "H.263 encoder",
4907  .item_name = av_default_item_name,
4908  .option = h263_options,
4909  .version = LIBAVUTIL_VERSION_INT,
4910 };
4911 
4913  .name = "h263",
4914  .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4915  .type = AVMEDIA_TYPE_VIDEO,
4916  .id = AV_CODEC_ID_H263,
4917  .priv_data_size = sizeof(MpegEncContext),
4919  .encode2 = ff_mpv_encode_picture,
4920  .close = ff_mpv_encode_end,
4922  .priv_class = &h263_class,
4923 };
4924 
4925 static const AVOption h263p_options[] = {
4926  { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4927  { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4928  { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4929  { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE},
4931  { NULL },
4932 };
4933 static const AVClass h263p_class = {
4934  .class_name = "H.263p encoder",
4935  .item_name = av_default_item_name,
4936  .option = h263p_options,
4937  .version = LIBAVUTIL_VERSION_INT,
4938 };
4939 
4941  .name = "h263p",
4942  .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4943  .type = AVMEDIA_TYPE_VIDEO,
4944  .id = AV_CODEC_ID_H263P,
4945  .priv_data_size = sizeof(MpegEncContext),
4947  .encode2 = ff_mpv_encode_picture,
4948  .close = ff_mpv_encode_end,
4949  .capabilities = AV_CODEC_CAP_SLICE_THREADS,
4951  .priv_class = &h263p_class,
4952 };
4953 
4954 static const AVClass msmpeg4v2_class = {
4955  .class_name = "msmpeg4v2 encoder",
4956  .item_name = av_default_item_name,
4957  .option = ff_mpv_generic_options,
4958  .version = LIBAVUTIL_VERSION_INT,
4959 };
4960 
4962  .name = "msmpeg4v2",
4963  .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4964  .type = AVMEDIA_TYPE_VIDEO,
4965  .id = AV_CODEC_ID_MSMPEG4V2,
4966  .priv_data_size = sizeof(MpegEncContext),
4968  .encode2 = ff_mpv_encode_picture,
4969  .close = ff_mpv_encode_end,
4971  .priv_class = &msmpeg4v2_class,
4972 };
4973 
4974 static const AVClass msmpeg4v3_class = {
4975  .class_name = "msmpeg4v3 encoder",
4976  .item_name = av_default_item_name,
4977  .option = ff_mpv_generic_options,
4978  .version = LIBAVUTIL_VERSION_INT,
4979 };
4980 
4982  .name = "msmpeg4",
4983  .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4984  .type = AVMEDIA_TYPE_VIDEO,
4985  .id = AV_CODEC_ID_MSMPEG4V3,
4986  .priv_data_size = sizeof(MpegEncContext),
4988  .encode2 = ff_mpv_encode_picture,
4989  .close = ff_mpv_encode_end,
4991  .priv_class = &msmpeg4v3_class,
4992 };
4993 
4994 static const AVClass wmv1_class = {
4995  .class_name = "wmv1 encoder",
4996  .item_name = av_default_item_name,
4997  .option = ff_mpv_generic_options,
4998  .version = LIBAVUTIL_VERSION_INT,
4999 };
5000 
5002  .name = "wmv1",
5003  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
5004  .type = AVMEDIA_TYPE_VIDEO,
5005  .id = AV_CODEC_ID_WMV1,
5006  .priv_data_size = sizeof(MpegEncContext),
5008  .encode2 = ff_mpv_encode_picture,
5009  .close = ff_mpv_encode_end,
5011  .priv_class = &wmv1_class,
5012 };
int last_time_base
Definition: mpegvideo.h:386
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
Definition: mpeg12data.c:41
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:938
int plane
Definition: avisynth_c.h:422
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
Definition: avcodec.h:2986
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp.c:1009
static const AVClass wmv1_class
void ff_h261_reorder_mb_index(MpegEncContext *s)
Definition: h261enc.c:108
int chroma_elim_threshold
Definition: mpegvideo.h:114
#define INPLACE_OFFSET
Definition: mpegutils.h:123
void ff_jpeg_fdct_islow_10(int16_t *data)
static const AVOption h263_options[]
int frame_bits
bits used for the current frame
Definition: mpegvideo.h:338
IDCTDSPContext idsp
Definition: mpegvideo.h:227
av_cold int ff_dct_encode_init(MpegEncContext *s)
#define NULL
Definition: coverity.c:32
RateControlContext rc_context
contains stuff only accessed in ratecontrol.c
Definition: mpegvideo.h:341
const struct AVCodec * codec
Definition: avcodec.h:1770
int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
av_cold void ff_rate_control_uninit(MpegEncContext *s)
Definition: ratecontrol.c:672
#define FF_MPV_FLAG_STRICT_GOP
Definition: mpegvideo.h:572
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:2746
void ff_estimate_b_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:1510
qpel_mc_func avg_qpel_pixels_tab[2][16]
Definition: qpeldsp.h:74
int picture_number
Definition: mpegvideo.h:124
const char * s
Definition: avisynth_c.h:768
#define RECON_SHIFT
attribute_deprecated int intra_quant_bias
Definition: avcodec.h:2290
me_cmp_func frame_skip_cmp[6]
Definition: me_cmp.h:76
#define CANDIDATE_MB_TYPE_SKIPPED
Definition: mpegutils.h:109
rate control context.
Definition: ratecontrol.h:63
static int shift(int a, int b)
Definition: sonic.c:82
S(GMC)-VOP MPEG-4.
Definition: avutil.h:277
#define CONFIG_WMV2_ENCODER
Definition: config.h:1540
int size
void ff_mpeg1_encode_init(MpegEncContext *s)
Definition: mpeg12enc.c:1002
int esc3_level_length
Definition: mpegvideo.h:438
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2419
static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
int time_increment_bits
< number of bits to represent the fractional part of time (encoder only)
Definition: mpegvideo.h:385
void ff_h263_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: ituh263enc.c:103
This structure describes decoded (raw) audio or video data.
Definition: frame.h:201
AVCodec * avcodec_find_encoder(enum AVCodecID id)
Find a registered encoder with a matching codec ID.
Definition: utils.c:1303
int16_t(* p_mv_table)[2]
MV table (1MV per MB) P-frame encoding.
Definition: mpegvideo.h:245
#define FF_CMP_DCTMAX
Definition: avcodec.h:2211
void ff_fdct_ifast(int16_t *data)
Definition: jfdctfst.c:208
#define FF_MB_DECISION_BITS
chooses the one which needs the fewest bits
Definition: avcodec.h:2326
AVOption.
Definition: opt.h:246
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
Definition: avpacket.c:697
uint8_t * fcode_tab
smallest fcode needed for each MV
Definition: mpegvideo.h:279
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
Definition: mpegvideo.h:150
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:269
void ff_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:905
const uint8_t * y_dc_scale_table
qscale -> y_dc_scale table
Definition: mpegvideo.h:185
uint8_t * mb_mean
Table for MB luminance.
Definition: mpegpicture.h:74
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:3101
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:917
int last_mv[2][2][2]
last MV, used for MV prediction in MPEG-1 & B-frame MPEG-4
Definition: mpegvideo.h:278
uint8_t * edge_emu_buffer
temporary buffer for if MVs point to out-of-frame data
Definition: mpegpicture.h:36
int pre_pass
= 1 for the pre pass
Definition: motion_est.h:72
#define CONFIG_RV10_ENCODER
Definition: config.h:1523
#define AV_CODEC_FLAG_LOOP_FILTER
loop filter.
Definition: avcodec.h:900
op_pixels_func avg_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:68
#define FF_MPV_FLAG_SKIP_RD
Definition: mpegvideo.h:571
void(* dct_unquantize_h263_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:516
AVFrame * tmp_frames[MAX_B_FRAMES+2]
Definition: mpegvideo.h:556
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:206
attribute_deprecated int rc_qmod_freq
Definition: avcodec.h:2731
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
RateControlEntry * entry
Definition: ratecontrol.h:65
qpel_mc_func put_no_rnd_qpel_pixels_tab[2][16]
Definition: qpeldsp.h:75
void(* shrink[4])(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height)
#define CANDIDATE_MB_TYPE_INTER_I
Definition: mpegutils.h:116
int64_t bit_rate
the average bitrate
Definition: avcodec.h:1826
#define LIBAVUTIL_VERSION_INT
Definition: version.h:86
else temp
Definition: vf_mcdeint.c:256
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:393
const char * g
Definition: vf_curves.c:112
void ff_h263_encode_init(MpegEncContext *s)
Definition: ituh263enc.c:761
const char * desc
Definition: nvenc.c:60
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
Definition: mpegvideo.h:151
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
void ff_init_qscale_tab(MpegEncContext *s)
init s->current_picture.qscale_table from s->lambda_table
#define OFFSET(x)
uint16_t * mb_var
Table for MB variances.
Definition: mpegpicture.h:65
uint16_t(* q_chroma_intra_matrix16)[2][64]
Definition: mpegvideo.h:328
uint16_t chroma_intra_matrix[64]
Definition: mpegvideo.h:301
static int estimate_qp(MpegEncContext *s, int dry_run)
#define MAX_MV
Definition: motion_est.h:35
int acc
Definition: yuv2rgb.c:547
int max_bitrate
Maximum bitrate of the stream, in bits per second.
Definition: avcodec.h:1359
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:2047
int16_t(*[3] ac_val)[16]
used for MPEG-4 AC prediction, all 3 arrays must be continuous
Definition: mpegvideo.h:191
MJPEG encoder.
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
Definition: mpegvideo.h:129
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Definition: encode.c:430
attribute_deprecated int frame_skip_cmp
Definition: avcodec.h:2853
#define FF_MPV_COMMON_OPTS
Definition: mpegvideo.h:605
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:2498
#define me
int frame_skip_cmp
Definition: mpegvideo.h:564
int msmpeg4_version
0=not msmpeg4, 1=mp41, 2=mp42, 3=mp43/divx3 4=wmv1/7 5=wmv2/8
Definition: mpegvideo.h:436
int b_frame_strategy
Definition: mpegvideo.h:557
#define CANDIDATE_MB_TYPE_BIDIR
Definition: mpegutils.h:114
int num
Numerator.
Definition: rational.h:59
av_cold void ff_h263dsp_init(H263DSPContext *ctx)
Definition: h263dsp.c:117
int size
Definition: avcodec.h:1680
attribute_deprecated int lmax
Definition: avcodec.h:2835
enum AVCodecID codec_id
Definition: mpegvideo.h:109
const char * b
Definition: vf_curves.c:113
void ff_get_2pass_fcode(MpegEncContext *s)
Definition: ratecontrol.c:857
#define CONFIG_MJPEG_ENCODER
Definition: config.h:1501
void avpriv_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
Definition: bitstream.c:64
int obmc
overlapped block motion compensation
Definition: mpegvideo.h:366
void avpriv_align_put_bits(PutBitContext *s)
Pad the bitstream with zeros up to the next byte boundary.
Definition: bitstream.c:48
void ff_mpeg1_clean_buffers(MpegEncContext *s)
Definition: mpeg12.c:115
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:2172
int frame_skip_exp
Definition: mpegvideo.h:563
int ff_h261_get_picture_format(int width, int height)
Definition: h261enc.c:40
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1989
#define FF_MPV_FLAG_NAQ
Definition: mpegvideo.h:575
int16_t(*[2][2] p_field_mv_table)[2]
MV table (2MV per MB) interlaced P-frame encoding.
Definition: mpegvideo.h:251
static int select_input_picture(MpegEncContext *s)
static const AVClass msmpeg4v3_class
int min_qcoeff
minimum encodable coefficient
Definition: mpegvideo.h:308
static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride)
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:121
int out_size
Definition: movenc.c:55
int ildct_cmp
interlaced DCT comparison function
Definition: avcodec.h:2197
int coded_score[12]
Definition: mpegvideo.h:320
mpegvideo header.
const uint16_t ff_h263_format[8][2]
Definition: h263data.c:238
av_cold int ff_mjpeg_encode_init(MpegEncContext *s)
Definition: mjpegenc.c:70
int scene_change_score
Definition: motion_est.h:87
int mpv_flags
flags set by private options
Definition: mpegvideo.h:526
uint8_t permutated[64]
Definition: idctdsp.h:33
static const AVClass h263_class
uint8_t run
Definition: svq3.c:206
static AVPacket pkt
void ff_xvid_rate_control_uninit(struct MpegEncContext *s)
Definition: libxvid_rc.c:158
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:3164
uint8_t * intra_ac_vlc_length
Definition: mpegvideo.h:311
#define EDGE_TOP
int padding_bug_score
used to detect the VERY common padding bug in MPEG-4
Definition: mpegvideo.h:409
int ff_mpeg_ref_picture(AVCodecContext *avctx, Picture *dst, Picture *src)
Definition: mpegpicture.c:361
#define UNI_AC_ENC_INDEX(run, level)
Definition: mpegvideo.h:318
int mb_num
number of MBs of a picture
Definition: mpegvideo.h:130
av_cold void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx)
Definition: fdctdsp.c:26
#define src
Definition: vp8dsp.c:254
#define FF_LAMBDA_SHIFT
Definition: avutil.h:225
static void clip_coeffs(MpegEncContext *s, int16_t *block, int last_index)
QpelDSPContext qdsp
Definition: mpegvideo.h:232
An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of structures with info about macroblo...
Definition: avcodec.h:1458
int stride
Definition: mace.c:144
AVCodec.
Definition: avcodec.h:3739
#define MAX_FCODE
Definition: mpegutils.h:48
static void write_mb_info(MpegEncContext *s)
int time_base
time in seconds of last I,P,S Frame
Definition: mpegvideo.h:387
uint8_t(* mv_penalty)[MAX_DMV *2+1]
bit amount needed to encode a MV
Definition: motion_est.h:93
int qscale
QP.
Definition: mpegvideo.h:201
int h263_aic
Advanced INTRA Coding (AIC)
Definition: mpegvideo.h:84
int16_t(* b_back_mv_table)[2]
MV table (1MV per MB) backward mode B-frame encoding.
Definition: mpegvideo.h:247
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:92
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
Definition: h263.c:307
int min_bitrate
Minimum bitrate of the stream, in bits per second.
Definition: avcodec.h:1364
int chroma_x_shift
Definition: mpegvideo.h:476
int encoding
true if we are encoding (vs decoding)
Definition: mpegvideo.h:111
int field_select[2][2]
Definition: mpegvideo.h:277
attribute_deprecated int frame_skip_exp
Definition: avcodec.h:2849
attribute_deprecated int me_method
This option does nothing.
Definition: avcodec.h:1996
uint32_t ff_square_tab[512]
Definition: me_cmp.c:32
#define CONFIG_RV20_ENCODER
Definition: config.h:1524
int quant_precision
Definition: mpegvideo.h:398
void ff_mpeg4_merge_partitions(MpegEncContext *s)
static int mb_var_thread(AVCodecContext *c, void *arg)
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac, coded_block for the current non-intra MB.
Definition: mpegvideo.c:2438
common functions for use with the Xvid wrappers
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1898
int modified_quant
Definition: mpegvideo.h:379
float ff_xvid_rate_estimate_qscale(struct MpegEncContext *s, int dry_run)
Definition: libxvid_rc.c:101
static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
#define FF_MPV_FLAG_CBP_RD
Definition: mpegvideo.h:574
int skipdct
skip dct and code zero residual
Definition: mpegvideo.h:217
float rc_buffer_aggressivity
Definition: mpegvideo.h:537
int b_frame_score
Definition: mpegpicture.h:84
const uint8_t ff_mpeg2_non_linear_qscale[32]
Definition: mpegvideodata.c:27
uint64_t vbv_delay
The delay between the time the packet this structure is associated with is received and the time when...
Definition: avcodec.h:1384
static int16_t block[64]
Definition: dct.c:115
void ff_mpeg4_clean_buffers(MpegEncContext *s)
Definition: mpeg4video.c:45
void(* draw_edges)(uint8_t *buf, int wrap, int width, int height, int w, int h, int sides)
attribute_deprecated int mv_bits
Definition: avcodec.h:2905
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
#define CANDIDATE_MB_TYPE_INTER
Definition: mpegutils.h:107
float p_masking
p block masking (0-> disabled)
Definition: avcodec.h:2133
int picture_in_gop_number
0-> first pic in gop, ...
Definition: mpegvideo.h:125
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:63
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
attribute_deprecated int rc_strategy
Definition: avcodec.h:2060
int alt_inter_vlc
alternative inter vlc
Definition: mpegvideo.h:378
void ff_mpeg1_encode_slice_header(MpegEncContext *s)
Definition: mpeg12enc.c:407
int ff_dct_quantize_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
uint8_t * ptr_lastgob
Definition: mpegvideo.h:493
int64_t time
time of current frame
Definition: mpegvideo.h:388
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
Definition: encode.c:32
static int encode_picture(MpegEncContext *s, int picture_number)
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
int bit_rate_tolerance
number of bits the bitstream is allowed to diverge from the reference.
Definition: avcodec.h:1834
static const AVClass msmpeg4v2_class
#define MV_DIRECT
bidirectional mode where the difference equals the MV of the last P/S/I-Frame (MPEG-4) ...
Definition: mpegvideo.h:264
ScratchpadContext sc
Definition: mpegvideo.h:199
uint8_t bits
Definition: crc.c:296
attribute_deprecated const char * rc_eq
Definition: avcodec.h:2754
attribute_deprecated float rc_buffer_aggressivity
Definition: avcodec.h:2776
uint8_t
#define av_cold
Definition: attributes.h:82
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:150
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
Picture ** input_picture
next pictures on display order for encoding
Definition: mpegvideo.h:134
#define CANDIDATE_MB_TYPE_INTER4V
Definition: mpegutils.h:108
AVOptions.
PutBitContext pb2
used for data partitioned VOPs
Definition: mpegvideo.h:407
enum OutputFormat out_format
output format
Definition: mpegvideo.h:101
attribute_deprecated int i_count
Definition: avcodec.h:2913
#define CANDIDATE_MB_TYPE_FORWARD_I
Definition: mpegutils.h:117
#define CONFIG_FAANDCT
Definition: config.h:596
uint16_t(* dct_offset)[64]
Definition: mpegvideo.h:334
int noise_reduction
Definition: mpegvideo.h:567
void ff_dct_encode_init_x86(MpegEncContext *s)
Definition: mpegvideoenc.c:213
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
uint16_t * chroma_intra_matrix
custom intra quantization matrix
Definition: avcodec.h:3558
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16])
void(* diff_pixels)(int16_t *av_restrict block, const uint8_t *s1, const uint8_t *s2, ptrdiff_t stride)
Definition: pixblockdsp.h:32
void ff_msmpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: msmpeg4enc.c:224
static void mpv_encode_defaults(MpegEncContext *s)
Set the given MpegEncContext to defaults for encoding.
#define FF_RC_STRATEGY_XVID
Definition: avcodec.h:2061
Multithreading support functions.
int pre_dia_size
ME prepass diamond size & shape.
Definition: avcodec.h:2248
AVCodec ff_h263_encoder
int frame_skip_threshold
Definition: mpegvideo.h:561
static const AVOption h263p_options[]
static int get_sae(uint8_t *src, int ref, int stride)
#define FF_CMP_VSSE
Definition: avcodec.h:2207
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
Definition: avcodec.h:921
void ff_free_picture_tables(Picture *pic)
Definition: mpegpicture.c:458
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:395
#define emms_c()
Definition: internal.h:54
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:294
int misc_bits
cbp, mb_type
Definition: mpegvideo.h:352
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1876
int no_rounding
apply no rounding to motion compensation (MPEG-4, msmpeg4, ...) for B-frames rounding mode is always ...
Definition: mpegvideo.h:284
H.263 tables.
#define CANDIDATE_MB_TYPE_BACKWARD_I
Definition: mpegutils.h:118
int interlaced_dct
Definition: mpegvideo.h:481
int(* q_chroma_intra_matrix)[64]
Definition: mpegvideo.h:324
int me_cmp
motion estimation comparison function
Definition: avcodec.h:2179
#define QUANT_BIAS_SHIFT
Definition: mpegvideo_enc.c:71
void ff_mpeg4_encode_video_packet_header(MpegEncContext *s)
Picture current_picture
copy of the current picture structure.
Definition: mpegvideo.h:177
#define CHROMA_420
Definition: mpegvideo.h:473
int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
Definition: mpegpicture.c:444
int intra_dc_precision
Definition: mpegvideo.h:461
int repeat_first_field
Definition: mpegvideo.h:470
static AVFrame * frame
quarterpel DSP functions
const char data[16]
Definition: mxf.c:90
#define CONFIG_MPEG1VIDEO_ENCODER
Definition: config.h:1502
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:38
#define height
int16_t(* b_bidir_forw_mv_table)[2]
MV table (1MV per MB) bidir mode B-frame encoding.
Definition: mpegvideo.h:248
static void dct_single_coeff_elimination(MpegEncContext *s, int n, int threshold)
uint8_t * data
Definition: avcodec.h:1679
const uint16_t ff_aanscales[64]
Definition: aandcttab.c:26
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
int ff_wmv2_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: wmv2enc.c:74
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
#define ff_dlog(a,...)
#define AVERROR_EOF
End of file.
Definition: error.h:55
uint16_t pp_time
time distance between the last 2 p,s,i frames
Definition: mpegvideo.h:390
me_cmp_func nsse[6]
Definition: me_cmp.h:65
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
#define lrintf(x)
Definition: libm_mips.h:70
#define CODEC_FLAG_MV0
Definition: avcodec.h:1123
const uint8_t * scantable
Definition: idctdsp.h:32
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:330
int mb_height
number of MBs horizontally & vertically
Definition: mpegvideo.h:126
static void rebase_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Rebase the bit writer onto a reallocated buffer.
Definition: put_bits.h:71
int buffer_size
The size of the buffer to which the ratecontrol is applied, in bits.
Definition: avcodec.h:1375
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
float lumi_masking
luminance masking (0-> disabled)
Definition: avcodec.h:2112
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:2931
int max_qcoeff
maximum encodable coefficient
Definition: mpegvideo.h:309
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:904
high precision timer, useful to profile code
static void update_noise_reduction(MpegEncContext *s)
#define FF_MPV_FLAG_QP_RD
Definition: mpegvideo.h:573
int scenechange_threshold
Definition: mpegvideo.h:566
AVCPBProperties * av_cpb_properties_alloc(size_t *size)
Allocate a CPB properties structure and initialize its fields to default values.
Definition: utils.c:2265
void ff_mpeg1_encode_mb(MpegEncContext *s, int16_t block[8][64], int motion_x, int motion_y)
Definition: mpeg12enc.c:993
#define FFALIGN(x, a)
Definition: macros.h:48
attribute_deprecated uint64_t error[AV_NUM_DATA_POINTERS]
Definition: frame.h:336
#define MAX_LEVEL
Definition: rl.h:36
attribute_deprecated int frame_skip_threshold
Definition: avcodec.h:2841
void ff_h261_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: h261enc.c:53
int dquant
qscale difference to prev qscale
Definition: mpegvideo.h:207
int flipflop_rounding
Definition: mpegvideo.h:435
#define CHROMA_444
Definition: mpegvideo.h:475
int num_entries
number of RateControlEntries
Definition: ratecontrol.h:64
int gop_picture_number
index of the first picture of a GOP based on fake_pic_num & MPEG-1 specific
Definition: mpegvideo.h:449
uint8_t * mb_info_ptr
Definition: mpegvideo.h:369
#define av_log(a,...)
static void ff_update_block_index(MpegEncContext *s)
Definition: mpegvideo.h:731
#define ff_sqrt
Definition: mathops.h:206
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
Definition: mpegvideo.c:2809
#define ROUNDED_DIV(a, b)
Definition: common.h:56
int(* q_inter_matrix)[64]
Definition: mpegvideo.h:325
#define FF_COMPLIANCE_UNOFFICIAL
Allow unofficial extensions.
Definition: avcodec.h:2985
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: avcodec.h:1711
static int get_bits_diff(MpegEncContext *s)
Definition: mpegvideo.h:745
attribute_deprecated int skip_count
Definition: avcodec.h:2917
#define EDGE_WIDTH
Definition: mpegpicture.h:33
int(* q_intra_matrix)[64]
precomputed matrix (combine qscale and DCT renorm)
Definition: mpegvideo.h:323
#define FF_MPV_FLAG_MV0
Definition: mpegvideo.h:576
int intra_only
if true, only intra pictures are generated
Definition: mpegvideo.h:99
av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
int16_t * dc_val[3]
used for MPEG-4 DC prediction, all 3 arrays must be continuous
Definition: mpegvideo.h:184
enum AVCodecID id
Definition: avcodec.h:3753
int h263_plus
H.263+ headers.
Definition: mpegvideo.h:106
H263DSPContext h263dsp
Definition: mpegvideo.h:234
int slice_context_count
number of used thread_contexts
Definition: mpegvideo.h:153
#define MAX_DMV
Definition: motion_est.h:37
int last_non_b_pict_type
used for MPEG-4 gmc B-frames & ratecontrol
Definition: mpegvideo.h:212
int width
Definition: frame.h:259
void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
Permute an 8x8 block according to permutation.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
Definition: put_bits.h:324
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:2083
int last_dc[3]
last DC values for MPEG-1
Definition: mpegvideo.h:182
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
attribute_deprecated float rc_initial_cplx
Definition: avcodec.h:2779
uint8_t * inter_ac_vlc_last_length
Definition: mpegvideo.h:316
#define AV_CODEC_FLAG_4MV
4 MV per MB allowed / advanced prediction for H.263.
Definition: avcodec.h:880
#define MAX_MB_BYTES
Definition: mpegutils.h:47
int64_t total_bits
Definition: mpegvideo.h:337
#define PTRDIFF_SPECIFIER
Definition: internal.h:256
int mb_skipped
MUST BE SET only during DECODING.
Definition: mpegvideo.h:192
#define ARCH_X86
Definition: config.h:38
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
int chroma_y_shift
Definition: mpegvideo.h:477
int strict_std_compliance
strictly follow the std (MPEG-4, ...)
Definition: mpegvideo.h:115
av_default_item_name
int partitioned_frame
is current frame partitioned
Definition: mpegvideo.h:403
uint8_t * rd_scratchpad
scratchpad for rate distortion mb decision
Definition: mpegpicture.h:37
#define AVERROR(e)
Definition: error.h:43
uint64_t encoding_error[AV_NUM_DATA_POINTERS]
Definition: mpegpicture.h:90
#define MAX_PICTURE_COUNT
Definition: mpegpicture.h:32
av_cold int ff_rate_control_init(MpegEncContext *s)
Definition: ratecontrol.c:472
int me_sub_cmp
subpixel motion estimation comparison function
Definition: avcodec.h:2185
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:163
attribute_deprecated uint64_t vbv_delay
VBV delay coded in the last frame (in periods of a 27 MHz clock).
Definition: avcodec.h:3418
int qmax
maximum quantizer
Definition: avcodec.h:2712
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2447
static void update_mb_info(MpegEncContext *s, int startcode)
#define MERGE(field)
void ff_write_pass1_stats(MpegEncContext *s)
Definition: ratecontrol.c:38
int unrestricted_mv
mv can point outside of the coded picture
Definition: mpegvideo.h:220
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:181
ERContext er
Definition: mpegvideo.h:551
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:3211
int last_lambda_for[5]
last lambda for a specific pict type
Definition: mpegvideo.h:216
static int sse_mb(MpegEncContext *s)
int reference
Definition: mpegpicture.h:87
const char * r
Definition: vf_curves.c:111
int ff_xvid_rate_control_init(struct MpegEncContext *s)
Definition: libxvid_rc.c:42
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
PixblockDSPContext pdsp
Definition: mpegvideo.h:231
const char * arg
Definition: jacosubdec.c:66
uint8_t * intra_chroma_ac_vlc_length
Definition: mpegvideo.h:313
int h263_slice_structured
Definition: mpegvideo.h:377
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1856
uint8_t * buf
Definition: put_bits.h:38
uint16_t width
Definition: gdv.c:47
int64_t av_gcd(int64_t a, int64_t b)
Compute the greatest common divisor of two integer operands.
Definition: mathematics.c:37
MpegvideoEncDSPContext mpvencdsp
Definition: mpegvideo.h:230
attribute_deprecated int inter_quant_bias
Definition: avcodec.h:2296
const char * name
Name of the codec implementation.
Definition: avcodec.h:3746
int quarter_sample
1->qpel, 0->half pel ME/MC
Definition: mpegvideo.h:399
uint16_t * mb_type
Table for candidate MB types for encoding (defines in mpegutils.h)
Definition: mpegvideo.h:291
int me_pre
prepass for motion estimation
Definition: mpegvideo.h:260
void ff_mjpeg_encode_picture_trailer(PutBitContext *pb, int header_bits)
int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int size)
Shrink the already allocated side data buffer.
Definition: avpacket.c:570
int low_delay
no reordering needed / has no B-frames
Definition: mpegvideo.h:404
qpel_mc_func put_qpel_pixels_tab[2][16]
Definition: qpeldsp.h:73
uint8_t *[2][2] b_field_select_table
Definition: mpegvideo.h:254
static const uint8_t offset[127][2]
Definition: vf_spp.c:92
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:1122
#define FFMAX(a, b)
Definition: common.h:94
av_cold void ff_pixblockdsp_init(PixblockDSPContext *c, AVCodecContext *avctx)
Definition: pixblockdsp.c:81
#define fail()
Definition: checkasm.h:109
int64_t mb_var_sum_temp
Definition: motion_est.h:86
int(* pix_norm1)(uint8_t *pix, int line_size)
int(* pix_sum)(uint8_t *pix, int line_size)
attribute_deprecated int b_sensitivity
Definition: avcodec.h:2469
int flags
A combination of AV_PKT_FLAG values.
Definition: avcodec.h:1685
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:85
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
static void frame_end(MpegEncContext *s)
int resync_mb_x
x position of last resync marker
Definition: mpegvideo.h:356
int rc_buffer_size
decoder bitstream buffer size
Definition: avcodec.h:2739
void ff_clean_h263_qscales(MpegEncContext *s)
modify qscale so that encoding is actually possible in H.263 (limit difference to -2...
Definition: ituh263enc.c:266
int coded_picture_number
used to set pic->coded_picture_number, should not be used for/by anything else
Definition: mpegvideo.h:123
int * lambda_table
Definition: mpegvideo.h:205
static int estimate_best_b_count(MpegEncContext *s)
int intra_dc_precision
precision of the intra DC coefficient - 8
Definition: avcodec.h:2372
void ff_wmv2_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: wmv2enc.c:147
int64_t rc_min_rate
minimum bitrate
Definition: avcodec.h:2769
common internal API header
uint8_t * intra_ac_vlc_last_length
Definition: mpegvideo.h:312
static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x, int motion_y, int mb_block_height, int mb_block_width, int mb_block_count)
const uint8_t *const ff_mpeg2_dc_scale_table[4]
Definition: mpegvideodata.c:82
void ff_h263_loop_filter(MpegEncContext *s)
Definition: h263.c:135
#define CHROMA_422
Definition: mpegvideo.h:474
float border_masking
Definition: mpegvideo.h:538
int progressive_frame
Definition: mpegvideo.h:479
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:284
const uint8_t ff_h263_chroma_qscale_table[32]
Definition: h263data.c:262
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
Definition: avcodec.h:876
#define FFMIN(a, b)
Definition: common.h:96
int display_picture_number
picture number in display order
Definition: frame.h:319
uint16_t(* q_inter_matrix16)[2][64]
Definition: mpegvideo.h:329
uint8_t * vbv_delay_ptr
pointer to vbv_delay in the bitstream
Definition: mpegvideo.h:451
int fixed_qscale
fixed qscale if non zero
Definition: mpegvideo.h:110
void ff_clean_mpeg4_qscales(MpegEncContext *s)
modify mb_type & qscale so that encoding is actually possible in MPEG-4
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
#define AV_CODEC_FLAG_AC_PRED
H.263 advanced intra coding / MPEG-4 AC prediction.
Definition: avcodec.h:934
int me_method
ME algorithm.
Definition: mpegvideo.h:256
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:157
int umvplus
== H.263+ && unrestricted_mv
Definition: mpegvideo.h:375
Picture new_picture
copy of the source picture structure for encoding.
Definition: mpegvideo.h:171
void(* dct_unquantize_mpeg1_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:506
int intra_quant_bias
bias for the quantizer
Definition: mpegvideo.h:306
int width
picture width / height.
Definition: avcodec.h:1948
int16_t(*[2] motion_val)[2]
Definition: mpegpicture.h:53
Picture * current_picture_ptr
pointer to the current picture
Definition: mpegvideo.h:181
Picture.
Definition: mpegpicture.h:45
attribute_deprecated int noise_reduction
Definition: avcodec.h:2350
int alternate_scan
Definition: mpegvideo.h:468
float rc_max_available_vbv_use
Ratecontrol attempt to use, at maximum, of what can be used without an underflow. ...
Definition: avcodec.h:2787
int ff_rv10_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: rv10enc.c:32
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:908
float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
Definition: ratecontrol.c:868
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:892
uint16_t(* q_intra_matrix16)[2][64]
identical to the above but for MMX & these are not permutated, second 64 entries are bias ...
Definition: mpegvideo.h:327
attribute_deprecated int frame_skip_factor
Definition: avcodec.h:2845
#define FF_MB_DECISION_SIMPLE
uses mb_cmp
Definition: avcodec.h:2325
perm
Definition: f_perms.c:74
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:3204
void(* dct_unquantize_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:520
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:324
int(* ac_stats)[2][MAX_LEVEL+1][MAX_RUN+1][2]
[mb_intra][isChroma][level][run][last]
Definition: mpegvideo.h:441
void(* dct_unquantize_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:518
int block_last_index[12]
last non zero coefficient in block
Definition: mpegvideo.h:83
MotionEstContext me
Definition: mpegvideo.h:282
int frame_skip_factor
Definition: mpegvideo.h:562
int n
Definition: avisynth_c.h:684
static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale)
uint8_t idct_permutation[64]
IDCT input permutation.
Definition: idctdsp.h:96
void(* fdct)(int16_t *block)
Definition: fdctdsp.h:27
const int16_t ff_mpeg4_default_non_intra_matrix[64]
Definition: mpeg4data.h:348
int mb_decision
macroblock decision mode
Definition: avcodec.h:2324
#define CONFIG_FLV_ENCODER
Definition: config.h:1491
static int get_intra_count(MpegEncContext *s, uint8_t *src, uint8_t *ref, int stride)
attribute_deprecated float rc_qsquish
Definition: avcodec.h:2726
uint8_t * mbintra_table
used to avoid setting {ac, dc, cbp}-pred stuff to zero on inter MB decoding
Definition: mpegvideo.h:195
#define MAX_B_FRAMES
Definition: mpegvideo.h:63
void(* dct_unquantize_mpeg2_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:512
if(ret< 0)
Definition: vf_mcdeint.c:279
int ff_msmpeg4_encode_init(MpegEncContext *s)
Definition: msmpeg4enc.c:121
int ac_esc_length
num of bits needed to encode the longest esc
Definition: mpegvideo.h:310
preferred ID for MPEG-1/2 video decoding
Definition: avcodec.h:219
static void set_put_bits_buffer_size(PutBitContext *s, int size)
Change the end of the buffer.
Definition: put_bits.h:358
#define FF_ARRAY_ELEMS(a)
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
Definition: avcodec.h:3192
int block_index[6]
index to current MB in block based arrays with edges
Definition: mpegvideo.h:293
Compute and use optimal Huffman tables.
Definition: mjpegenc.h:97
#define av_log2
Definition: intmath.h:83
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:514
int * mb_index2xy
mb_index -> mb_x + mb_y*mb_stride
Definition: mpegvideo.h:297
static uint8_t default_fcode_tab[MAX_MV *2+1]
Definition: mpegvideo_enc.c:83
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: avcodec.h:1069
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:274
AVCodec ff_h263p_encoder
attribute_deprecated int i_tex_bits
Definition: avcodec.h:2909
static void build_basis(uint8_t *perm)
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:266
int first_slice_line
used in MPEG-4 too to handle resync markers
Definition: mpegvideo.h:434
int frame_pred_frame_dct
Definition: mpegvideo.h:462
attribute_deprecated int misc_bits
Definition: avcodec.h:2919
This structure describes the bitrate properties of an encoded bitstream.
Definition: avcodec.h:1354
uint16_t * mc_mb_var
Table for motion compensated MB variances.
Definition: mpegpicture.h:68
void ff_flv_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: flvenc.c:27
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:263
int coded_picture_number
picture number in bitstream order
Definition: frame.h:315
#define src1
Definition: h264pred.c:139
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
uint16_t inter_matrix[64]
Definition: mpegvideo.h:302
#define FF_LAMBDA_SCALE
Definition: avutil.h:226
void ff_jpeg_fdct_islow_8(int16_t *data)
int64_t last_non_b_time
Definition: mpegvideo.h:389
void ff_h261_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: h261enc.c:237
#define QMAT_SHIFT
Definition: mpegvideo_enc.c:74
struct MpegEncContext * thread_context[MAX_THREADS]
Definition: mpegvideo.h:152
#define CONFIG_MSMPEG4_ENCODER
Definition: msmpeg4.h:75
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:237
unsigned int lambda2
(lambda*lambda) >> FF_LAMBDA_SHIFT
Definition: mpegvideo.h:204
#define CODEC_FLAG_NORMALIZE_AQP
Definition: avcodec.h:1150
void ff_faandct(int16_t *data)
Definition: faandct.c:114
double buffer_index
amount of bits in the video/audio buffer
Definition: ratecontrol.h:66
Libavcodec external API header.
attribute_deprecated int mpeg_quant
Definition: avcodec.h:2088
void ff_h263_update_motion_val(MpegEncContext *s)
Definition: h263.c:42
int h263_flv
use flv H.263 header
Definition: mpegvideo.h:107
attribute_deprecated int scenechange_threshold
Definition: avcodec.h:2346
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer...
Definition: options.c:172
static const AVClass h263p_class
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:131
enum AVCodecID codec_id
Definition: avcodec.h:1778
#define QMAT_SHIFT_MMX
Definition: mpegvideo_enc.c:73
attribute_deprecated int prediction_method
Definition: avcodec.h:2152
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:232
void ff_convert_matrix(MpegEncContext *s, int(*qmat)[64], uint16_t(*qmat16)[2][64], const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra)
Definition: mpegvideo_enc.c:90
const uint16_t ff_inv_aanscales[64]
Definition: aandcttab.c:38
attribute_deprecated int b_frame_strategy
Definition: avcodec.h:2067
void ff_set_cmp(MECmpContext *c, me_cmp_func *cmp, int type)
Definition: me_cmp.c:440
#define START_TIMER
Definition: timer.h:137
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
uint8_t * intra_chroma_ac_vlc_last_length
Definition: mpegvideo.h:314
main external API structure.
Definition: avcodec.h:1761
int ff_alloc_picture(AVCodecContext *avctx, Picture *pic, MotionEstContext *me, ScratchpadContext *sc, int shared, int encoding, int chroma_x_shift, int chroma_y_shift, int out_format, int mb_stride, int mb_width, int mb_height, int b8_stride, ptrdiff_t *linesize, ptrdiff_t *uvlinesize)
Allocate a Picture.
Definition: mpegpicture.c:231
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:618
ScanTable intra_scantable
Definition: mpegvideo.h:88
void(* add_8x8basis)(int16_t rem[64], int16_t basis[64], int scale)
int qmin
minimum quantizer
Definition: avcodec.h:2705
int height
picture size. must be a multiple of 16
Definition: mpegvideo.h:97
#define FF_CMP_NSSE
Definition: avcodec.h:2208
#define FF_DEFAULT_QUANT_BIAS
Definition: avcodec.h:2291
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Definition: hpeldsp.h:38
static void write_slice_end(MpegEncContext *s)
int64_t dts_delta
pts difference between the first and second input frame, used for calculating dts of the first frame ...
Definition: mpegvideo.h:141
int64_t user_specified_pts
last non-zero pts from AVFrame which was passed into avcodec_encode_video2()
Definition: mpegvideo.h:137
void(* dct_unquantize_mpeg2_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:510
FDCTDSPContext fdsp
Definition: mpegvideo.h:224
static void denoise_dct_c(MpegEncContext *s, int16_t *block)
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:56
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
Definition: encode.c:400
uint8_t * buf_end
Definition: put_bits.h:38
static int frame_start(MpegEncContext *s)
float spatial_cplx_masking
spatial complexity masking (0-> disabled)
Definition: avcodec.h:2126
float rc_qmod_amp
Definition: mpegvideo.h:534
int luma_elim_threshold
Definition: mpegvideo.h:113
attribute_deprecated int header_bits
Definition: avcodec.h:2907
void ff_fix_long_p_mvs(MpegEncContext *s)
Definition: motion_est.c:1671
Picture * picture
main picture buffer
Definition: mpegvideo.h:133
int data_partitioning
data partitioning flag from header
Definition: mpegvideo.h:402
uint8_t * inter_ac_vlc_length
Definition: mpegvideo.h:315
int progressive_sequence
Definition: mpegvideo.h:454
uint16_t * intra_matrix
custom intra quantization matrix
Definition: avcodec.h:2334
H.261 codec.
void ff_h263_encode_gob_header(MpegEncContext *s, int mb_line)
Encode a group of blocks header.
Definition: ituh263enc.c:240
uint8_t * buf_ptr
Definition: put_bits.h:38
Describe the class of an AVClass context structure.
Definition: log.h:67
int stuffing_bits
bits used for stuffing
Definition: mpegvideo.h:339
op_pixels_func put_no_rnd_pixels_tab[4][4]
Halfpel motion compensation with no rounding (a+b)>>1.
Definition: hpeldsp.h:82
int16_t(*[2][2][2] b_field_mv_table)[2]
MV table (4MV per MB) interlaced B-frame encoding.
Definition: mpegvideo.h:252
#define FF_COMPLIANCE_NORMAL
Definition: avcodec.h:2984
int64_t mc_mb_var_sum
motion compensated MB variance for current frame
Definition: mpegpicture.h:82
int index
Definition: gxfenc.c:89
#define CANDIDATE_MB_TYPE_DIRECT
Definition: mpegutils.h:111
int(* try_8x8basis)(int16_t rem[64], int16_t weight[64], int16_t basis[64], int scale)
struct AVFrame * f
Definition: mpegpicture.h:46
#define FF_MB_DECISION_RD
rate distortion
Definition: avcodec.h:2327
static void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type)
void(* dct_unquantize_h263_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:514
int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type, uint8_t *data, size_t size)
Wrap an existing array as a packet side data.
Definition: avpacket.c:295
const uint16_t ff_mpeg1_default_intra_matrix[256]
Definition: mpeg12data.c:30
int input_picture_number
used to set pic->display_picture_number, should not be used for/by anything else
Definition: mpegvideo.h:122
AVCodec ff_wmv1_encoder
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:132
int mb_info
interval for outputting info about mb offsets as side data
Definition: mpegvideo.h:367
void ff_set_mpeg4_time(MpegEncContext *s)
static void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type)
attribute_deprecated int brd_scale
Definition: avcodec.h:2427
#define STRIDE_ALIGN
Definition: internal.h:99
av_cold void ff_mjpeg_encode_close(MpegEncContext *s)
Definition: mjpegenc.c:125
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: utils.c:653
#define CANDIDATE_MB_TYPE_BIDIR_I
Definition: mpegutils.h:119
const int16_t ff_mpeg4_default_intra_matrix[64]
Definition: mpeg4data.h:337
int(* dct_quantize)(struct MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo.h:522
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(INT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&HAVE_MMX) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
int f_code
forward MV resolution
Definition: mpegvideo.h:235
int ff_pre_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:1081
#define CANDIDATE_MB_TYPE_DIRECT0
Definition: mpegutils.h:121
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my)
Definition: mpeg4video.c:117
int ff_mjpeg_encode_stuffing(MpegEncContext *s)
Writes the complete JPEG frame when optimal huffman tables are enabled, otherwise writes the stuffing...
void(* denoise_dct)(struct MpegEncContext *s, int16_t *block)
Definition: mpegvideo.h:524
void ff_mjpeg_encode_picture_header(AVCodecContext *avctx, PutBitContext *pb, ScanTable *intra_scantable, int pred, uint16_t luma_intra_matrix[64], uint16_t chroma_intra_matrix[64])
attribute_deprecated int p_tex_bits
Definition: avcodec.h:2911
static int weight(int i, int blen, int offset)
Definition: diracdec.c:1535
#define MV_DIR_FORWARD
Definition: mpegvideo.h:262
uint16_t * inter_matrix
custom inter quantization matrix
Definition: avcodec.h:2341
int max_b_frames
max number of B-frames for encoding
Definition: mpegvideo.h:112
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:209
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:266
void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> dc
int last_mv_dir
last mv_dir, used for B-frame encoding
Definition: mpegvideo.h:450
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:283
int h263_pred
use MPEG-4/H.263 ac/dc predictions
Definition: mpegvideo.h:102
int16_t(* b_bidir_back_mv_table)[2]
MV table (1MV per MB) bidir mode B-frame encoding.
Definition: mpegvideo.h:249
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:505
float dark_masking
darkness masking (0-> disabled)
Definition: avcodec.h:2140
static int64_t pts
Global timestamp for the audio frames.
float temporal_cplx_masking
temporary complexity masking (0-> disabled)
Definition: avcodec.h:2119
void(* get_pixels)(int16_t *av_restrict block, const uint8_t *pixels, ptrdiff_t stride)
Definition: pixblockdsp.h:29
int ff_init_me(MpegEncContext *s)
Definition: motion_est.c:306
uint8_t *[2] p_field_select_table
Definition: mpegvideo.h:253
int16_t(* b_direct_mv_table)[2]
MV table (1MV per MB) direct mode B-frame encoding.
Definition: mpegvideo.h:250
#define AV_CODEC_FLAG_QPEL
Use qpel MC.
Definition: avcodec.h:888
AAN (Arai, Agui and Nakajima) (I)DCT tables.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:215
const uint8_t * c_dc_scale_table
qscale -> c_dc_scale table
Definition: mpegvideo.h:186
uint8_t level
Definition: svq3.c:207
me_cmp_func sad[6]
Definition: me_cmp.h:56
int me_penalty_compensation
Definition: mpegvideo.h:259
int64_t mc_mb_var_sum_temp
Definition: motion_est.h:85
int mv[2][4][2]
motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second " : depend...
Definition: mpegvideo.h:276
int16_t(* b_forw_mv_table)[2]
MV table (1MV per MB) forward mode B-frame encoding.
Definition: mpegvideo.h:246
int b8_stride
2*mb_width+1 used for some 8x8 block arrays to allow simple addressing
Definition: mpegvideo.h:128
me_cmp_func sse[6]
Definition: me_cmp.h:57
static int estimate_motion_thread(AVCodecContext *c, void *arg)
int vbv_ignore_qmax
Definition: mpegvideo.h:540
#define BASIS_SHIFT
MpegEncContext.
Definition: mpegvideo.h:78
Picture * next_picture_ptr
pointer to the next picture (for bidir pred)
Definition: mpegvideo.h:180
char * rc_eq
Definition: mpegvideo.h:542
int8_t * qscale_table
Definition: mpegpicture.h:50
#define MAX_RUN
Definition: rl.h:35
struct AVCodecContext * avctx
Definition: mpegvideo.h:95
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:1974
PutBitContext pb
bit output
Definition: mpegvideo.h:148
static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
void ff_mpeg_unref_picture(AVCodecContext *avctx, Picture *pic)
Deallocate a picture.
Definition: mpegpicture.c:294
av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
#define CONFIG_MPEG4_ENCODER
Definition: config.h:1504
#define CONFIG_MPEG2VIDEO_ENCODER
Definition: config.h:1503
volatile int error_count
int
static void update_qscale(MpegEncContext *s)
int mb_cmp
macroblock comparison function (not supported yet)
Definition: avcodec.h:2191
int quantizer_noise_shaping
Definition: mpegvideo.h:527
int(* dct_error_sum)[64]
Definition: mpegvideo.h:332
int(* fast_dct_quantize)(struct MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo.h:523
MECmpContext mecc
Definition: mpegvideo.h:228
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
void ff_msmpeg4_encode_ext_header(MpegEncContext *s)
Definition: msmpeg4enc.c:284
float rc_initial_cplx
Definition: mpegvideo.h:536
static int pre_estimate_motion_thread(AVCodecContext *c, void *arg)
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:83
static const int32_t qmat16[MAT_SIZE]
Definition: hq_hqadata.c:342
common internal api header.
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11 ...
Definition: mpegvideo.h:127
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:101
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
#define CANDIDATE_MB_TYPE_FORWARD
Definition: mpegutils.h:112
attribute_deprecated int rtp_payload_size
Definition: avcodec.h:2894
uint8_t * dest[3]
Definition: mpegvideo.h:295
int shared
Definition: mpegpicture.h:88
static double c[64]
int last_pict_type
Definition: mpegvideo.h:211
#define CONFIG_H261_ENCODER
Definition: config.h:1493
#define COPY(a)
void ff_h263_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: ituh263enc.c:447
int adaptive_quant
use adaptive quantization
Definition: mpegvideo.h:206
static int16_t basis[64][64]
attribute_deprecated float border_masking
Definition: avcodec.h:2393
Picture last_picture
copy of the previous picture structure.
Definition: mpegvideo.h:159
Picture * last_picture_ptr
pointer to the previous picture.
Definition: mpegvideo.h:179
Bi-dir predicted.
Definition: avutil.h:276
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
float rc_qsquish
ratecontrol qmin qmax limiting method 0-> clipping, 1-> use a nice continuous function to limit qscal...
Definition: mpegvideo.h:533
int64_t reordered_pts
reordered pts to be used as dts for the next output frame when there&#39;s a delay
Definition: mpegvideo.h:145
attribute_deprecated AVFrame * coded_frame
the picture in the bitstream
Definition: avcodec.h:3183
int ff_vbv_update(MpegEncContext *s, int frame_size)
Definition: ratecontrol.c:681
#define H263_GOB_HEIGHT(h)
Definition: h263.h:44
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Definition: avpacket.c:33
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:48
int den
Denominator.
Definition: rational.h:60
#define CONFIG_H263_ENCODER
Definition: config.h:1494
attribute_deprecated float rc_qmod_amp
Definition: avcodec.h:2729
#define CONFIG_H263P_ENCODER
Definition: config.h:1495
const uint8_t * chroma_qscale_table
qscale -> chroma_qscale (H.263)
Definition: mpegvideo.h:187
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:71
AVCodec ff_msmpeg4v3_encoder
int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
Definition: mpegvideo.c:455
int trellis
trellis RD quantization
Definition: avcodec.h:2861
AVCPBProperties * ff_add_cpb_side_data(AVCodecContext *avctx)
Add a CPB properties side data to an encoding context.
Definition: utils.c:2279
static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:777
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:896
void ff_mpeg1_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: mpeg12enc.c:421
void ff_mpeg4_stuffing(PutBitContext *pbc)
add MPEG-4 stuffing bits (01...1)
#define CANDIDATE_MB_TYPE_INTRA
Definition: mpegutils.h:106
int16_t(* blocks)[12][64]
Definition: mpegvideo.h:499
#define STOP_TIMER(id)
Definition: timer.h:138
int slices
Number of slices.
Definition: avcodec.h:2514
int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic_arg, int *got_packet)
void * priv_data
Definition: avcodec.h:1803
const AVOption ff_mpv_generic_options[]
Definition: mpegvideo_enc.c:85
#define PICT_FRAME
Definition: mpegutils.h:39
int last_bits
temp var used for calculating the above vars
Definition: mpegvideo.h:353
void ff_mpeg4_init_partitions(MpegEncContext *s)
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:877
static av_always_inline int diff(const uint32_t a, const uint32_t b)
int picture_structure
Definition: mpegvideo.h:458
int dia_size
ME diamond size & shape.
Definition: avcodec.h:2221
#define av_free(p)
attribute_deprecated int frame_bits
Definition: avcodec.h:2923
VideoDSPContext vdsp
Definition: mpegvideo.h:233
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:84
#define VE
static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src)
attribute_deprecated int me_penalty_compensation
Definition: avcodec.h:2415
void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo.c:2727
int avg_bitrate
Average bitrate of the stream, in bits per second.
Definition: avcodec.h:1369
int ff_get_best_fcode(MpegEncContext *s, int16_t(*mv_table)[2], int type)
Definition: motion_est.c:1618
int resync_mb_y
y position of last resync marker
Definition: mpegvideo.h:357
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:1811
int16_t(* block)[64]
points to one of the following blocks
Definition: mpegvideo.h:498
void ff_mjpeg_encode_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mjpegenc.c:282
int64_t bit_rate
wanted bit rate
Definition: mpegvideo.h:100
This side data corresponds to the AVCPBProperties struct.
Definition: avcodec.h:1510
PutBitContext tex_pb
used for data partitioned VOPs
Definition: mpegvideo.h:406
Picture next_picture
copy of the next picture structure.
Definition: mpegvideo.h:165
attribute_deprecated int p_count
Definition: avcodec.h:2915
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:279
attribute_deprecated int error_rate
Definition: avcodec.h:3405
attribute_deprecated void(* rtp_callback)(struct AVCodecContext *avctx, void *data, int size, int mb_nb)
Definition: avcodec.h:2888
static void set_frame_distances(MpegEncContext *s)
static const double coeff[2][5]
Definition: vf_owdenoise.c:72
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:227
#define EDGE_BOTTOM
void ff_fix_long_mvs(MpegEncContext *s, uint8_t *field_select_table, int field_select, int16_t(*mv_table)[2], int f_code, int type, int truncate)
Definition: motion_est.c:1720
Picture ** reordered_input_picture
pointer to the next pictures in coded order for encoding
Definition: mpegvideo.h:135
static const struct twinvq_data tab
unsigned int byte_buffer_size
Definition: internal.h:180
void ff_rv20_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: rv20enc.c:35
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: avcodec.h:1678
static int encode_thread(AVCodecContext *c, void *arg)
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding). ...
Definition: mpegvideo.c:634
int height
Definition: frame.h:259
void ff_mpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
Definition: mpegvideo.h:300
#define LOCAL_ALIGNED_16(t, v,...)
Definition: internal.h:124
static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src)
#define av_freep(p)
static void update_duplicate_context_after_me(MpegEncContext *dst, MpegEncContext *src)
void INT64 INT64 count
Definition: avisynth_c.h:690
void INT64 start
Definition: avisynth_c.h:690
ScanTable inter_scantable
if inter == intra then intra should be used to reduce the cache usage
Definition: mpegvideo.h:87
#define av_always_inline
Definition: attributes.h:39
#define M_PI
Definition: mathematics.h:52
int ff_mpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
int rtp_payload_size
Definition: mpegvideo.h:488
#define AV_CODEC_FLAG_CLOSED_GOP
Definition: avcodec.h:939
Floating point AAN DCT
static uint8_t default_mv_penalty[MAX_FCODE+1][MAX_DMV *2+1]
Definition: mpegvideo_enc.c:82
int inter_quant_bias
bias for the quantizer
Definition: mpegvideo.h:307
av_cold void ff_qpeldsp_init(QpelDSPContext *c)
Definition: qpeldsp.c:783
const char int length
Definition: avisynth_c.h:768
attribute_deprecated int lmin
Definition: avcodec.h:2829
#define CANDIDATE_MB_TYPE_BACKWARD
Definition: mpegutils.h:113
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int size)
Allocate new information of a packet.
Definition: avpacket.c:329
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:267
int b_code
backward MV resolution for B-frames (MPEG-4)
Definition: mpegvideo.h:236
void ff_msmpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: msmpeg4enc.c:376
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
Definition: avcodec.h:3232
void ff_h261_encode_init(MpegEncContext *s)
Definition: h261enc.c:365
int dct_count[2]
Definition: mpegvideo.h:333
int64_t mb_var_sum
sum of MB variance for current frame
Definition: mpegpicture.h:81
static int encode_frame(AVCodecContext *c, AVFrame *frame)
void(* dct_unquantize_mpeg1_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:508
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
This structure stores compressed data.
Definition: avcodec.h:1656
uint8_t * byte_buffer
temporary buffer used for encoders to store their bitstream
Definition: internal.h:179
int delay
Codec delay.
Definition: avcodec.h:1931
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:2981
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1672
int ff_check_alignment(void)
Definition: me_cmp.c:988
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:603
#define FF_ALLOCZ_OR_GOTO(ctx, p, size, label)
Definition: internal.h:142
int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
Definition: utils.c:1974
me_cmp_func ildct_cmp[6]
Definition: me_cmp.h:75
#define FFMAX3(a, b, c)
Definition: common.h:95
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
static void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type, PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2], int *dmin, int *next_block, int motion_x, int motion_y)
Predicted.
Definition: avutil.h:275
unsigned int lambda
Lagrange multiplier used in rate distortion.
Definition: mpegvideo.h:203
AVCodec ff_msmpeg4v2_encoder
int64_t rc_max_rate
maximum bitrate
Definition: avcodec.h:2762
uint16_t pb_time
time distance between the last b and p,s,i frame
Definition: mpegvideo.h:391
enum idct_permutation_type perm_type
Definition: idctdsp.h:97
attribute_deprecated int pre_me
Definition: avcodec.h:2233
HpelDSPContext hdsp
Definition: mpegvideo.h:226
static const uint8_t sp5x_quant_table[20][64]
Definition: sp5x.h:135
int next_lambda
next lambda used for retrying to encode a frame
Definition: mpegvideo.h:340