FFmpeg  2.8.17
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
mpegvideo_enc.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  *
24  * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
25  */
26 
27 /**
28  * @file
29  * The simplest mpeg encoder (well, it was the simplest!).
30  */
31 
32 #include <stdint.h>
33 
34 #include "libavutil/internal.h"
35 #include "libavutil/intmath.h"
36 #include "libavutil/mathematics.h"
37 #include "libavutil/pixdesc.h"
38 #include "libavutil/opt.h"
39 #include "libavutil/timer.h"
40 #include "avcodec.h"
41 #include "dct.h"
42 #include "idctdsp.h"
43 #include "mpeg12.h"
44 #include "mpegvideo.h"
45 #include "mpegvideodata.h"
46 #include "h261.h"
47 #include "h263.h"
48 #include "h263data.h"
49 #include "mjpegenc_common.h"
50 #include "mathops.h"
51 #include "mpegutils.h"
52 #include "mjpegenc.h"
53 #include "msmpeg4.h"
54 #include "pixblockdsp.h"
55 #include "qpeldsp.h"
56 #include "faandct.h"
57 #include "thread.h"
58 #include "aandcttab.h"
59 #include "flv.h"
60 #include "mpeg4video.h"
61 #include "internal.h"
62 #include "bytestream.h"
63 #include "wmv2.h"
64 #include "rv10.h"
65 #include <limits.h>
66 #include "sp5x.h"
67 
68 #define QUANT_BIAS_SHIFT 8
69 
70 #define QMAT_SHIFT_MMX 16
71 #define QMAT_SHIFT 21
72 
74 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
75 static int sse_mb(MpegEncContext *s);
76 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
77 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
78 
81 
84  { NULL },
85 };
86 
87 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
88  uint16_t (*qmat16)[2][64],
89  const uint16_t *quant_matrix,
90  int bias, int qmin, int qmax, int intra)
91 {
92  FDCTDSPContext *fdsp = &s->fdsp;
93  int qscale;
94  int shift = 0;
95 
96  for (qscale = qmin; qscale <= qmax; qscale++) {
97  int i;
98  if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
100  fdsp->fdct == ff_faandct ||
101 #endif /* CONFIG_FAANDCT */
102  fdsp->fdct == ff_jpeg_fdct_islow_10) {
103  for (i = 0; i < 64; i++) {
104  const int j = s->idsp.idct_permutation[i];
105  int64_t den = (int64_t) qscale * quant_matrix[j];
106  /* 16 <= qscale * quant_matrix[i] <= 7905
107  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
108  * 19952 <= x <= 249205026
109  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
110  * 3444240 >= (1 << 36) / (x) >= 275 */
111 
112  qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) / den);
113  }
114  } else if (fdsp->fdct == ff_fdct_ifast) {
115  for (i = 0; i < 64; i++) {
116  const int j = s->idsp.idct_permutation[i];
117  int64_t den = ff_aanscales[i] * (int64_t) qscale * quant_matrix[j];
118  /* 16 <= qscale * quant_matrix[i] <= 7905
119  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
120  * 19952 <= x <= 249205026
121  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
122  * 3444240 >= (1 << 36) / (x) >= 275 */
123 
124  qmat[qscale][i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) / den);
125  }
126  } else {
127  for (i = 0; i < 64; i++) {
128  const int j = s->idsp.idct_permutation[i];
129  int64_t den = (int64_t) qscale * quant_matrix[j];
130  /* We can safely suppose that 16 <= quant_matrix[i] <= 255
131  * Assume x = qscale * quant_matrix[i]
132  * So 16 <= x <= 7905
133  * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
134  * so 32768 >= (1 << 19) / (x) >= 67 */
135  qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) / den);
136  //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
137  // (qscale * quant_matrix[i]);
138  qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) / den;
139 
140  if (qmat16[qscale][0][i] == 0 ||
141  qmat16[qscale][0][i] == 128 * 256)
142  qmat16[qscale][0][i] = 128 * 256 - 1;
143  qmat16[qscale][1][i] =
144  ROUNDED_DIV(bias << (16 - QUANT_BIAS_SHIFT),
145  qmat16[qscale][0][i]);
146  }
147  }
148 
149  for (i = intra; i < 64; i++) {
150  int64_t max = 8191;
151  if (fdsp->fdct == ff_fdct_ifast) {
152  max = (8191LL * ff_aanscales[i]) >> 14;
153  }
154  while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
155  shift++;
156  }
157  }
158  }
159  if (shift) {
161  "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
162  QMAT_SHIFT - shift);
163  }
164 }
165 
166 static inline void update_qscale(MpegEncContext *s)
167 {
168  if (s->q_scale_type == 1) {
169  int i;
170  int bestdiff=INT_MAX;
171  int best = 1;
172  static const uint8_t non_linear_qscale[] = {
173  1,2,3,4,5,6,7,8,9,10,11,12,14,16,18,20,24,26,28
174  };
175 
176  for (i = 0 ; i<FF_ARRAY_ELEMS(non_linear_qscale); i++) {
177  int diff = FFABS((non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 7)) - (int)s->lambda * 139);
178  if (non_linear_qscale[i] < s->avctx->qmin ||
179  (non_linear_qscale[i] > s->avctx->qmax && !s->vbv_ignore_qmax))
180  continue;
181  if (diff < bestdiff) {
182  bestdiff = diff;
183  best = non_linear_qscale[i];
184  }
185  }
186  s->qscale = best;
187  } else {
188  s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
189  (FF_LAMBDA_SHIFT + 7);
190  s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
191  }
192 
193  s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
195 }
196 
197 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
198 {
199  int i;
200 
201  if (matrix) {
202  put_bits(pb, 1, 1);
203  for (i = 0; i < 64; i++) {
204  put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
205  }
206  } else
207  put_bits(pb, 1, 0);
208 }
209 
210 /**
211  * init s->current_picture.qscale_table from s->lambda_table
212  */
214 {
215  int8_t * const qscale_table = s->current_picture.qscale_table;
216  int i;
217 
218  for (i = 0; i < s->mb_num; i++) {
219  unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
220  int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
221  qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
222  s->avctx->qmax);
223  }
224 }
225 
228 {
229 #define COPY(a) dst->a= src->a
230  COPY(pict_type);
232  COPY(f_code);
233  COPY(b_code);
234  COPY(qscale);
235  COPY(lambda);
236  COPY(lambda2);
239  COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
240  COPY(progressive_frame); // FIXME don't set in encode_header
241  COPY(partitioned_frame); // FIXME don't set in encode_header
242 #undef COPY
243 }
244 
245 /**
246  * Set the given MpegEncContext to defaults for encoding.
247  * the changed fields will not depend upon the prior state of the MpegEncContext.
248  */
250 {
251  int i;
253 
254  for (i = -16; i < 16; i++) {
255  default_fcode_tab[i + MAX_MV] = 1;
256  }
259 
260  s->input_picture_number = 0;
261  s->picture_in_gop_number = 0;
262 }
263 
265  if (ARCH_X86)
267 
270  if (!s->dct_quantize)
272  if (!s->denoise_dct)
275  if (s->avctx->trellis)
277 
278  return 0;
279 }
280 
281 /* init video encoder */
283 {
284  MpegEncContext *s = avctx->priv_data;
285  int i, ret, format_supported;
286 
288 
289  switch (avctx->codec_id) {
291  if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
292  avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
293  av_log(avctx, AV_LOG_ERROR,
294  "only YUV420 and YUV422 are supported\n");
295  return -1;
296  }
297  break;
298  case AV_CODEC_ID_MJPEG:
299  case AV_CODEC_ID_AMV:
300  format_supported = 0;
301  /* JPEG color space */
302  if (avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
303  avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
304  avctx->pix_fmt == AV_PIX_FMT_YUVJ444P ||
305  (avctx->color_range == AVCOL_RANGE_JPEG &&
306  (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
307  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
308  avctx->pix_fmt == AV_PIX_FMT_YUV444P)))
309  format_supported = 1;
310  /* MPEG color space */
311  else if (avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL &&
312  (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
313  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
314  avctx->pix_fmt == AV_PIX_FMT_YUV444P))
315  format_supported = 1;
316 
317  if (!format_supported) {
318  av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
319  return -1;
320  }
321  break;
322  default:
323  if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
324  av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
325  return -1;
326  }
327  }
328 
329  switch (avctx->pix_fmt) {
330  case AV_PIX_FMT_YUVJ444P:
331  case AV_PIX_FMT_YUV444P:
333  break;
334  case AV_PIX_FMT_YUVJ422P:
335  case AV_PIX_FMT_YUV422P:
337  break;
338  case AV_PIX_FMT_YUVJ420P:
339  case AV_PIX_FMT_YUV420P:
340  default:
342  break;
343  }
344 
345  avctx->bits_per_raw_sample = av_clip(avctx->bits_per_raw_sample, 0, 8);
346  s->bit_rate = avctx->bit_rate;
347  s->width = avctx->width;
348  s->height = avctx->height;
349  if (avctx->gop_size > 600 &&
351  av_log(avctx, AV_LOG_WARNING,
352  "keyframe interval too large!, reducing it from %d to %d\n",
353  avctx->gop_size, 600);
354  avctx->gop_size = 600;
355  }
356  s->gop_size = avctx->gop_size;
357  s->avctx = avctx;
358  if (avctx->max_b_frames > MAX_B_FRAMES) {
359  av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
360  "is %d.\n", MAX_B_FRAMES);
361  avctx->max_b_frames = MAX_B_FRAMES;
362  }
363  s->max_b_frames = avctx->max_b_frames;
364  s->codec_id = avctx->codec->id;
366  s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
367  s->mpeg_quant = avctx->mpeg_quant;
368  s->rtp_mode = !!avctx->rtp_payload_size;
370 
371  // workaround some differences between how applications specify dc precision
372  if (s->intra_dc_precision < 0) {
373  s->intra_dc_precision += 8;
374  } else if (s->intra_dc_precision >= 8)
375  s->intra_dc_precision -= 8;
376 
377  if (s->intra_dc_precision < 0) {
378  av_log(avctx, AV_LOG_ERROR,
379  "intra dc precision must be positive, note some applications use"
380  " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
381  return AVERROR(EINVAL);
382  }
383 
384  if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
385  av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
386  return AVERROR(EINVAL);
387  }
389 
390  if (s->gop_size <= 1) {
391  s->intra_only = 1;
392  s->gop_size = 12;
393  } else {
394  s->intra_only = 0;
395  }
396 
397 #if FF_API_MOTION_EST
399  s->me_method = avctx->me_method;
401 #endif
402 
403  /* Fixed QSCALE */
404  s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
405 
406 #if FF_API_MPV_OPT
408  if (avctx->border_masking != 0.0)
409  s->border_masking = avctx->border_masking;
411 #endif
412 
413  s->adaptive_quant = (s->avctx->lumi_masking ||
414  s->avctx->dark_masking ||
417  s->avctx->p_masking ||
418  s->border_masking ||
419  (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
420  !s->fixed_qscale;
421 
423 
424  if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
425  switch(avctx->codec_id) {
428  avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
429  break;
430  case AV_CODEC_ID_MPEG4:
434  if (avctx->rc_max_rate >= 15000000) {
435  avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
436  } else if(avctx->rc_max_rate >= 2000000) {
437  avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
438  } else if(avctx->rc_max_rate >= 384000) {
439  avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
440  } else
441  avctx->rc_buffer_size = 40;
442  avctx->rc_buffer_size *= 16384;
443  break;
444  }
445  if (avctx->rc_buffer_size) {
446  av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
447  }
448  }
449 
450  if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
451  av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
452  return -1;
453  }
454 
455  if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
456  av_log(avctx, AV_LOG_INFO,
457  "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
458  }
459 
460  if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
461  av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
462  return -1;
463  }
464 
465  if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
466  av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
467  return -1;
468  }
469 
470  if (avctx->rc_max_rate &&
471  avctx->rc_max_rate == avctx->bit_rate &&
472  avctx->rc_max_rate != avctx->rc_min_rate) {
473  av_log(avctx, AV_LOG_INFO,
474  "impossible bitrate constraints, this will fail\n");
475  }
476 
477  if (avctx->rc_buffer_size &&
478  avctx->bit_rate * (int64_t)avctx->time_base.num >
479  avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
480  av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
481  return -1;
482  }
483 
484  if (!s->fixed_qscale &&
485  avctx->bit_rate * av_q2d(avctx->time_base) >
486  avctx->bit_rate_tolerance) {
487  av_log(avctx, AV_LOG_WARNING,
488  "bitrate tolerance %d too small for bitrate %d, overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
489  avctx->bit_rate_tolerance = 5 * avctx->bit_rate * av_q2d(avctx->time_base);
490  }
491 
492  if (s->avctx->rc_max_rate &&
493  s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
496  90000LL * (avctx->rc_buffer_size - 1) >
497  s->avctx->rc_max_rate * 0xFFFFLL) {
498  av_log(avctx, AV_LOG_INFO,
499  "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
500  "specified vbv buffer is too large for the given bitrate!\n");
501  }
502 
503  if ((s->avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
505  s->codec_id != AV_CODEC_ID_FLV1) {
506  av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
507  return -1;
508  }
509 
510  if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
511  av_log(avctx, AV_LOG_ERROR,
512  "OBMC is only supported with simple mb decision\n");
513  return -1;
514  }
515 
516  if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
517  av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
518  return -1;
519  }
520 
521  if (s->max_b_frames &&
522  s->codec_id != AV_CODEC_ID_MPEG4 &&
525  av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
526  return -1;
527  }
528  if (s->max_b_frames < 0) {
529  av_log(avctx, AV_LOG_ERROR,
530  "max b frames must be 0 or positive for mpegvideo based encoders\n");
531  return -1;
532  }
533 
534  if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
535  s->codec_id == AV_CODEC_ID_H263 ||
536  s->codec_id == AV_CODEC_ID_H263P) &&
537  (avctx->sample_aspect_ratio.num > 255 ||
538  avctx->sample_aspect_ratio.den > 255)) {
539  av_log(avctx, AV_LOG_WARNING,
540  "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
543  avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
544  }
545 
546  if ((s->codec_id == AV_CODEC_ID_H263 ||
547  s->codec_id == AV_CODEC_ID_H263P) &&
548  (avctx->width > 2048 ||
549  avctx->height > 1152 )) {
550  av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
551  return -1;
552  }
553  if ((s->codec_id == AV_CODEC_ID_H263 ||
554  s->codec_id == AV_CODEC_ID_H263P) &&
555  ((avctx->width &3) ||
556  (avctx->height&3) )) {
557  av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
558  return -1;
559  }
560 
561  if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
562  (avctx->width > 4095 ||
563  avctx->height > 4095 )) {
564  av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
565  return -1;
566  }
567 
568  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
569  (avctx->width > 16383 ||
570  avctx->height > 16383 )) {
571  av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
572  return -1;
573  }
574 
575  if (s->codec_id == AV_CODEC_ID_RV10 &&
576  (avctx->width &15 ||
577  avctx->height&15 )) {
578  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
579  return AVERROR(EINVAL);
580  }
581 
582  if (s->codec_id == AV_CODEC_ID_RV20 &&
583  (avctx->width &3 ||
584  avctx->height&3 )) {
585  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
586  return AVERROR(EINVAL);
587  }
588 
589  if ((s->codec_id == AV_CODEC_ID_WMV1 ||
590  s->codec_id == AV_CODEC_ID_WMV2) &&
591  avctx->width & 1) {
592  av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
593  return -1;
594  }
595 
598  av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
599  return -1;
600  }
601 
602  // FIXME mpeg2 uses that too
603  if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
604  && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
605  av_log(avctx, AV_LOG_ERROR,
606  "mpeg2 style quantization not supported by codec\n");
607  return -1;
608  }
609 
610  if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
611  av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
612  return -1;
613  }
614 
615  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
617  av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
618  return -1;
619  }
620 
621  if (s->avctx->scenechange_threshold < 1000000000 &&
623  av_log(avctx, AV_LOG_ERROR,
624  "closed gop with scene change detection are not supported yet, "
625  "set threshold to 1000000000\n");
626  return -1;
627  }
628 
629  if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY) {
630  if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
631  av_log(avctx, AV_LOG_ERROR,
632  "low delay forcing is only available for mpeg2\n");
633  return -1;
634  }
635  if (s->max_b_frames != 0) {
636  av_log(avctx, AV_LOG_ERROR,
637  "b frames cannot be used with low delay\n");
638  return -1;
639  }
640  }
641 
642  if (s->q_scale_type == 1) {
643  if (avctx->qmax > 28) {
644  av_log(avctx, AV_LOG_ERROR,
645  "non linear quant only supports qmax <= 28 currently\n");
646  return -1;
647  }
648  }
649 
650  if (s->avctx->thread_count > 1 &&
651  s->codec_id != AV_CODEC_ID_MPEG4 &&
654  s->codec_id != AV_CODEC_ID_MJPEG &&
655  (s->codec_id != AV_CODEC_ID_H263P)) {
656  av_log(avctx, AV_LOG_ERROR,
657  "multi threaded encoding not supported by codec\n");
658  return -1;
659  }
660 
661  if (s->avctx->thread_count < 1) {
662  av_log(avctx, AV_LOG_ERROR,
663  "automatic thread number detection not supported by codec, "
664  "patch welcome\n");
665  return -1;
666  }
667 
668  if (s->avctx->slices > 1 || s->avctx->thread_count > 1)
669  s->rtp_mode = 1;
670 
671  if (s->avctx->thread_count > 1 && s->codec_id == AV_CODEC_ID_H263P)
672  s->h263_slice_structured = 1;
673 
674  if (!avctx->time_base.den || !avctx->time_base.num) {
675  av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
676  return -1;
677  }
678 
679  if (avctx->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
680  av_log(avctx, AV_LOG_INFO,
681  "notice: b_frame_strategy only affects the first pass\n");
682  avctx->b_frame_strategy = 0;
683  }
684 
685  i = av_gcd(avctx->time_base.den, avctx->time_base.num);
686  if (i > 1) {
687  av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
688  avctx->time_base.den /= i;
689  avctx->time_base.num /= i;
690  //return -1;
691  }
692 
694  // (a + x * 3 / 8) / x
695  s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
696  s->inter_quant_bias = 0;
697  } else {
698  s->intra_quant_bias = 0;
699  // (a - x / 4) / x
700  s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
701  }
702 
703  if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
704  av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
705  return AVERROR(EINVAL);
706  }
707 
708 #if FF_API_QUANT_BIAS
717 #endif
718 
719  av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
720 
721  if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
722  s->avctx->time_base.den > (1 << 16) - 1) {
723  av_log(avctx, AV_LOG_ERROR,
724  "timebase %d/%d not supported by MPEG 4 standard, "
725  "the maximum admitted value for the timebase denominator "
726  "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
727  (1 << 16) - 1);
728  return -1;
729  }
730  s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
731 
732  switch (avctx->codec->id) {
734  s->out_format = FMT_MPEG1;
736  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
737  break;
739  s->out_format = FMT_MPEG1;
741  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
742  s->rtp_mode = 1;
743  break;
744  case AV_CODEC_ID_MJPEG:
745  case AV_CODEC_ID_AMV:
746  s->out_format = FMT_MJPEG;
747  s->intra_only = 1; /* force intra only for jpeg */
748  if (!CONFIG_MJPEG_ENCODER ||
749  ff_mjpeg_encode_init(s) < 0)
750  return -1;
751  avctx->delay = 0;
752  s->low_delay = 1;
753  break;
754  case AV_CODEC_ID_H261:
755  if (!CONFIG_H261_ENCODER)
756  return -1;
757  if (ff_h261_get_picture_format(s->width, s->height) < 0) {
758  av_log(avctx, AV_LOG_ERROR,
759  "The specified picture size of %dx%d is not valid for the "
760  "H.261 codec.\nValid sizes are 176x144, 352x288\n",
761  s->width, s->height);
762  return -1;
763  }
764  s->out_format = FMT_H261;
765  avctx->delay = 0;
766  s->low_delay = 1;
767  s->rtp_mode = 0; /* Sliced encoding not supported */
768  break;
769  case AV_CODEC_ID_H263:
770  if (!CONFIG_H263_ENCODER)
771  return -1;
773  s->width, s->height) == 8) {
774  av_log(avctx, AV_LOG_ERROR,
775  "The specified picture size of %dx%d is not valid for "
776  "the H.263 codec.\nValid sizes are 128x96, 176x144, "
777  "352x288, 704x576, and 1408x1152. "
778  "Try H.263+.\n", s->width, s->height);
779  return -1;
780  }
781  s->out_format = FMT_H263;
782  avctx->delay = 0;
783  s->low_delay = 1;
784  break;
785  case AV_CODEC_ID_H263P:
786  s->out_format = FMT_H263;
787  s->h263_plus = 1;
788  /* Fx */
789  s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
790  s->modified_quant = s->h263_aic;
791  s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
792  s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
793 
794  /* /Fx */
795  /* These are just to be sure */
796  avctx->delay = 0;
797  s->low_delay = 1;
798  break;
799  case AV_CODEC_ID_FLV1:
800  s->out_format = FMT_H263;
801  s->h263_flv = 2; /* format = 1; 11-bit codes */
802  s->unrestricted_mv = 1;
803  s->rtp_mode = 0; /* don't allow GOB */
804  avctx->delay = 0;
805  s->low_delay = 1;
806  break;
807  case AV_CODEC_ID_RV10:
808  s->out_format = FMT_H263;
809  avctx->delay = 0;
810  s->low_delay = 1;
811  break;
812  case AV_CODEC_ID_RV20:
813  s->out_format = FMT_H263;
814  avctx->delay = 0;
815  s->low_delay = 1;
816  s->modified_quant = 1;
817  s->h263_aic = 1;
818  s->h263_plus = 1;
819  s->loop_filter = 1;
820  s->unrestricted_mv = 0;
821  break;
822  case AV_CODEC_ID_MPEG4:
823  s->out_format = FMT_H263;
824  s->h263_pred = 1;
825  s->unrestricted_mv = 1;
826  s->low_delay = s->max_b_frames ? 0 : 1;
827  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
828  break;
830  s->out_format = FMT_H263;
831  s->h263_pred = 1;
832  s->unrestricted_mv = 1;
833  s->msmpeg4_version = 2;
834  avctx->delay = 0;
835  s->low_delay = 1;
836  break;
838  s->out_format = FMT_H263;
839  s->h263_pred = 1;
840  s->unrestricted_mv = 1;
841  s->msmpeg4_version = 3;
842  s->flipflop_rounding = 1;
843  avctx->delay = 0;
844  s->low_delay = 1;
845  break;
846  case AV_CODEC_ID_WMV1:
847  s->out_format = FMT_H263;
848  s->h263_pred = 1;
849  s->unrestricted_mv = 1;
850  s->msmpeg4_version = 4;
851  s->flipflop_rounding = 1;
852  avctx->delay = 0;
853  s->low_delay = 1;
854  break;
855  case AV_CODEC_ID_WMV2:
856  s->out_format = FMT_H263;
857  s->h263_pred = 1;
858  s->unrestricted_mv = 1;
859  s->msmpeg4_version = 5;
860  s->flipflop_rounding = 1;
861  avctx->delay = 0;
862  s->low_delay = 1;
863  break;
864  default:
865  return -1;
866  }
867 
868  avctx->has_b_frames = !s->low_delay;
869 
870  s->encoding = 1;
871 
872  s->progressive_frame =
875  s->alternate_scan);
876 
877  /* init */
878  ff_mpv_idct_init(s);
879  if (ff_mpv_common_init(s) < 0)
880  return -1;
881 
882  ff_fdctdsp_init(&s->fdsp, avctx);
883  ff_me_cmp_init(&s->mecc, avctx);
885  ff_pixblockdsp_init(&s->pdsp, avctx);
886  ff_qpeldsp_init(&s->qdsp);
887 
888  if (s->msmpeg4_version) {
890  2 * 2 * (MAX_LEVEL + 1) *
891  (MAX_RUN + 1) * 2 * sizeof(int), fail);
892  }
893  FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
894 
895  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail);
896  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix, 64 * 32 * sizeof(int), fail);
897  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail);
898  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
899  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
900  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
902  MAX_PICTURE_COUNT * sizeof(Picture *), fail);
904  MAX_PICTURE_COUNT * sizeof(Picture *), fail);
905 
906  if (s->avctx->noise_reduction) {
908  2 * 64 * sizeof(uint16_t), fail);
909  }
910 
912 
915 
916  s->quant_precision = 5;
917 
920 
926  if ((ret = ff_msmpeg4_encode_init(s)) < 0)
927  return ret;
929  && s->out_format == FMT_MPEG1)
931 
932  /* init q matrix */
933  for (i = 0; i < 64; i++) {
934  int j = s->idsp.idct_permutation[i];
936  s->mpeg_quant) {
939  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
940  s->intra_matrix[j] =
942  } else {
943  /* mpeg1/2 */
944  s->chroma_intra_matrix[j] =
947  }
948  if (s->avctx->intra_matrix)
949  s->intra_matrix[j] = s->avctx->intra_matrix[i];
950  if (s->avctx->inter_matrix)
951  s->inter_matrix[j] = s->avctx->inter_matrix[i];
952  }
953 
954  /* precompute matrix */
955  /* for mjpeg, we do include qscale in the matrix */
956  if (s->out_format != FMT_MJPEG) {
958  s->intra_matrix, s->intra_quant_bias, avctx->qmin,
959  31, 1);
961  s->inter_matrix, s->inter_quant_bias, avctx->qmin,
962  31, 0);
963  }
964 
965  if (ff_rate_control_init(s) < 0)
966  return -1;
967 
968 #if FF_API_ERROR_RATE
970  if (avctx->error_rate)
971  s->error_rate = avctx->error_rate;
973 #endif
974 
975 #if FF_API_NORMALIZE_AQP
977  if (avctx->flags & CODEC_FLAG_NORMALIZE_AQP)
980 #endif
981 
982 #if FF_API_MV0
984  if (avctx->flags & CODEC_FLAG_MV0)
987 #endif
988 
989 #if FF_API_MPV_OPT
991  if (avctx->rc_qsquish != 0.0)
992  s->rc_qsquish = avctx->rc_qsquish;
993  if (avctx->rc_qmod_amp != 0.0)
994  s->rc_qmod_amp = avctx->rc_qmod_amp;
995  if (avctx->rc_qmod_freq)
996  s->rc_qmod_freq = avctx->rc_qmod_freq;
997  if (avctx->rc_buffer_aggressivity != 1.0)
999  if (avctx->rc_initial_cplx != 0.0)
1000  s->rc_initial_cplx = avctx->rc_initial_cplx;
1001  if (avctx->lmin)
1002  s->lmin = avctx->lmin;
1003  if (avctx->lmax)
1004  s->lmax = avctx->lmax;
1005 
1006  if (avctx->rc_eq) {
1007  av_freep(&s->rc_eq);
1008  s->rc_eq = av_strdup(avctx->rc_eq);
1009  if (!s->rc_eq)
1010  return AVERROR(ENOMEM);
1011  }
1013 #endif
1014 
1015  if (avctx->b_frame_strategy == 2) {
1016  for (i = 0; i < s->max_b_frames + 2; i++) {
1017  s->tmp_frames[i] = av_frame_alloc();
1018  if (!s->tmp_frames[i])
1019  return AVERROR(ENOMEM);
1020 
1022  s->tmp_frames[i]->width = s->width >> avctx->brd_scale;
1023  s->tmp_frames[i]->height = s->height >> avctx->brd_scale;
1024 
1025  ret = av_frame_get_buffer(s->tmp_frames[i], 32);
1026  if (ret < 0)
1027  return ret;
1028  }
1029  }
1030 
1031  return 0;
1032 fail:
1033  ff_mpv_encode_end(avctx);
1034  return AVERROR_UNKNOWN;
1035 }
1036 
1038 {
1039  MpegEncContext *s = avctx->priv_data;
1040  int i;
1041 
1043 
1044  ff_mpv_common_end(s);
1045  if (CONFIG_MJPEG_ENCODER &&
1046  s->out_format == FMT_MJPEG)
1048 
1049  av_freep(&avctx->extradata);
1050 
1051  for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1052  av_frame_free(&s->tmp_frames[i]);
1053 
1056 
1057  av_freep(&s->avctx->stats_out);
1058  av_freep(&s->ac_stats);
1059 
1064  av_freep(&s->q_intra_matrix);
1065  av_freep(&s->q_inter_matrix);
1068  av_freep(&s->input_picture);
1070  av_freep(&s->dct_offset);
1071 
1072  return 0;
1073 }
1074 
1075 static int get_sae(uint8_t *src, int ref, int stride)
1076 {
1077  int x,y;
1078  int acc = 0;
1079 
1080  for (y = 0; y < 16; y++) {
1081  for (x = 0; x < 16; x++) {
1082  acc += FFABS(src[x + y * stride] - ref);
1083  }
1084  }
1085 
1086  return acc;
1087 }
1088 
1090  uint8_t *ref, int stride)
1091 {
1092  int x, y, w, h;
1093  int acc = 0;
1094 
1095  w = s->width & ~15;
1096  h = s->height & ~15;
1097 
1098  for (y = 0; y < h; y += 16) {
1099  for (x = 0; x < w; x += 16) {
1100  int offset = x + y * stride;
1101  int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1102  stride, 16);
1103  int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1104  int sae = get_sae(src + offset, mean, stride);
1105 
1106  acc += sae + 500 < sad;
1107  }
1108  }
1109  return acc;
1110 }
1111 
1112 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1113 {
1114  return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1116  s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1117  &s->linesize, &s->uvlinesize);
1118 }
1119 
1120 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1121 {
1122  Picture *pic = NULL;
1123  int64_t pts;
1124  int i, display_picture_number = 0, ret;
1125  const int encoding_delay = s->max_b_frames ? s->max_b_frames :
1126  (s->low_delay ? 0 : 1);
1127  int direct = 1;
1128 
1129  if (pic_arg) {
1130  pts = pic_arg->pts;
1131  display_picture_number = s->input_picture_number++;
1132 
1133  if (pts != AV_NOPTS_VALUE) {
1134  if (s->user_specified_pts != AV_NOPTS_VALUE) {
1135  int64_t last = s->user_specified_pts;
1136 
1137  if (pts <= last) {
1139  "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1140  pts, last);
1141  return AVERROR(EINVAL);
1142  }
1143 
1144  if (!s->low_delay && display_picture_number == 1)
1145  s->dts_delta = pts - last;
1146  }
1147  s->user_specified_pts = pts;
1148  } else {
1149  if (s->user_specified_pts != AV_NOPTS_VALUE) {
1150  s->user_specified_pts =
1151  pts = s->user_specified_pts + 1;
1152  av_log(s->avctx, AV_LOG_INFO,
1153  "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1154  pts);
1155  } else {
1156  pts = display_picture_number;
1157  }
1158  }
1159  }
1160 
1161  if (pic_arg) {
1162  if (!pic_arg->buf[0] ||
1163  pic_arg->linesize[0] != s->linesize ||
1164  pic_arg->linesize[1] != s->uvlinesize ||
1165  pic_arg->linesize[2] != s->uvlinesize)
1166  direct = 0;
1167  if ((s->width & 15) || (s->height & 15))
1168  direct = 0;
1169  if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1170  direct = 0;
1171  if (s->linesize & (STRIDE_ALIGN-1))
1172  direct = 0;
1173 
1174  ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1175  pic_arg->linesize[1], s->linesize, s->uvlinesize);
1176 
1177  i = ff_find_unused_picture(s->avctx, s->picture, direct);
1178  if (i < 0)
1179  return i;
1180 
1181  pic = &s->picture[i];
1182  pic->reference = 3;
1183 
1184  if (direct) {
1185  if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1186  return ret;
1187  }
1188  ret = alloc_picture(s, pic, direct);
1189  if (ret < 0)
1190  return ret;
1191 
1192  if (!direct) {
1193  if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1194  pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1195  pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1196  // empty
1197  } else {
1198  int h_chroma_shift, v_chroma_shift;
1200  &h_chroma_shift,
1201  &v_chroma_shift);
1202 
1203  for (i = 0; i < 3; i++) {
1204  int src_stride = pic_arg->linesize[i];
1205  int dst_stride = i ? s->uvlinesize : s->linesize;
1206  int h_shift = i ? h_chroma_shift : 0;
1207  int v_shift = i ? v_chroma_shift : 0;
1208  int w = s->width >> h_shift;
1209  int h = s->height >> v_shift;
1210  uint8_t *src = pic_arg->data[i];
1211  uint8_t *dst = pic->f->data[i];
1212  int vpad = 16;
1213 
1214  if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1215  && !s->progressive_sequence
1216  && FFALIGN(s->height, 32) - s->height > 16)
1217  vpad = 32;
1218 
1219  if (!s->avctx->rc_buffer_size)
1220  dst += INPLACE_OFFSET;
1221 
1222  if (src_stride == dst_stride)
1223  memcpy(dst, src, src_stride * h);
1224  else {
1225  int h2 = h;
1226  uint8_t *dst2 = dst;
1227  while (h2--) {
1228  memcpy(dst2, src, w);
1229  dst2 += dst_stride;
1230  src += src_stride;
1231  }
1232  }
1233  if ((s->width & 15) || (s->height & (vpad-1))) {
1234  s->mpvencdsp.draw_edges(dst, dst_stride,
1235  w, h,
1236  16 >> h_shift,
1237  vpad >> v_shift,
1238  EDGE_BOTTOM);
1239  }
1240  }
1241  }
1242  }
1243  ret = av_frame_copy_props(pic->f, pic_arg);
1244  if (ret < 0)
1245  return ret;
1246 
1247  pic->f->display_picture_number = display_picture_number;
1248  pic->f->pts = pts; // we set this here to avoid modifiying pic_arg
1249  }
1250 
1251  /* shift buffer entries */
1252  for (i = 1; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1253  s->input_picture[i - 1] = s->input_picture[i];
1254 
1255  s->input_picture[encoding_delay] = (Picture*) pic;
1256 
1257  return 0;
1258 }
1259 
1260 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1261 {
1262  int x, y, plane;
1263  int score = 0;
1264  int64_t score64 = 0;
1265 
1266  for (plane = 0; plane < 3; plane++) {
1267  const int stride = p->f->linesize[plane];
1268  const int bw = plane ? 1 : 2;
1269  for (y = 0; y < s->mb_height * bw; y++) {
1270  for (x = 0; x < s->mb_width * bw; x++) {
1271  int off = p->shared ? 0 : 16;
1272  uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1273  uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1274  int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1275 
1276  switch (FFABS(s->avctx->frame_skip_exp)) {
1277  case 0: score = FFMAX(score, v); break;
1278  case 1: score += FFABS(v); break;
1279  case 2: score64 += v * (int64_t)v; break;
1280  case 3: score64 += FFABS(v * (int64_t)v * v); break;
1281  case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1282  }
1283  }
1284  }
1285  }
1286  emms_c();
1287 
1288  if (score)
1289  score64 = score;
1290  if (s->avctx->frame_skip_exp < 0)
1291  score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1292  -1.0/s->avctx->frame_skip_exp);
1293 
1294  if (score64 < s->avctx->frame_skip_threshold)
1295  return 1;
1296  if (score64 < ((s->avctx->frame_skip_factor * (int64_t)s->lambda) >> 8))
1297  return 1;
1298  return 0;
1299 }
1300 
1302 {
1303  AVPacket pkt = { 0 };
1304  int ret, got_output;
1305 
1306  av_init_packet(&pkt);
1307  ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
1308  if (ret < 0)
1309  return ret;
1310 
1311  ret = pkt.size;
1312  av_free_packet(&pkt);
1313  return ret;
1314 }
1315 
1317 {
1320  const int scale = s->avctx->brd_scale;
1321  int i, j, out_size, p_lambda, b_lambda, lambda2;
1322  int64_t best_rd = INT64_MAX;
1323  int best_b_count = -1;
1324 
1325  if (!c)
1326  return AVERROR(ENOMEM);
1327  av_assert0(scale >= 0 && scale <= 3);
1328 
1329  //emms_c();
1330  //s->next_picture_ptr->quality;
1331  p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1332  //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1333  b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1334  if (!b_lambda) // FIXME we should do this somewhere else
1335  b_lambda = p_lambda;
1336  lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1338 
1339  c->width = s->width >> scale;
1340  c->height = s->height >> scale;
1342  c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1343  c->mb_decision = s->avctx->mb_decision;
1344  c->me_cmp = s->avctx->me_cmp;
1345  c->mb_cmp = s->avctx->mb_cmp;
1346  c->me_sub_cmp = s->avctx->me_sub_cmp;
1348  c->time_base = s->avctx->time_base;
1349  c->max_b_frames = s->max_b_frames;
1350 
1351  if (avcodec_open2(c, codec, NULL) < 0)
1352  return -1;
1353 
1354  for (i = 0; i < s->max_b_frames + 2; i++) {
1355  Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1356  s->next_picture_ptr;
1357  uint8_t *data[4];
1358 
1359  if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1360  pre_input = *pre_input_ptr;
1361  memcpy(data, pre_input_ptr->f->data, sizeof(data));
1362 
1363  if (!pre_input.shared && i) {
1364  data[0] += INPLACE_OFFSET;
1365  data[1] += INPLACE_OFFSET;
1366  data[2] += INPLACE_OFFSET;
1367  }
1368 
1369  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1370  s->tmp_frames[i]->linesize[0],
1371  data[0],
1372  pre_input.f->linesize[0],
1373  c->width, c->height);
1374  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1375  s->tmp_frames[i]->linesize[1],
1376  data[1],
1377  pre_input.f->linesize[1],
1378  c->width >> 1, c->height >> 1);
1379  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1380  s->tmp_frames[i]->linesize[2],
1381  data[2],
1382  pre_input.f->linesize[2],
1383  c->width >> 1, c->height >> 1);
1384  }
1385  }
1386 
1387  for (j = 0; j < s->max_b_frames + 1; j++) {
1388  int64_t rd = 0;
1389 
1390  if (!s->input_picture[j])
1391  break;
1392 
1393  c->error[0] = c->error[1] = c->error[2] = 0;
1394 
1396  s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1397 
1398  out_size = encode_frame(c, s->tmp_frames[0]);
1399 
1400  //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1401 
1402  for (i = 0; i < s->max_b_frames + 1; i++) {
1403  int is_p = i % (j + 1) == j || i == s->max_b_frames;
1404 
1405  s->tmp_frames[i + 1]->pict_type = is_p ?
1407  s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1408 
1409  out_size = encode_frame(c, s->tmp_frames[i + 1]);
1410 
1411  rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1412  }
1413 
1414  /* get the delayed frames */
1415  while (out_size) {
1416  out_size = encode_frame(c, NULL);
1417  rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1418  }
1419 
1420  rd += c->error[0] + c->error[1] + c->error[2];
1421 
1422  if (rd < best_rd) {
1423  best_rd = rd;
1424  best_b_count = j;
1425  }
1426  }
1427 
1428  avcodec_close(c);
1429  av_freep(&c);
1430 
1431  return best_b_count;
1432 }
1433 
1435 {
1436  int i, ret;
1437 
1438  for (i = 1; i < MAX_PICTURE_COUNT; i++)
1440  s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1441 
1442  /* set next picture type & ordering */
1443  if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1445  if (s->picture_in_gop_number < s->gop_size &&
1446  s->next_picture_ptr &&
1447  skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1448  // FIXME check that te gop check above is +-1 correct
1449  av_frame_unref(s->input_picture[0]->f);
1450 
1451  ff_vbv_update(s, 0);
1452 
1453  goto no_output_pic;
1454  }
1455  }
1456 
1457  if (/*s->picture_in_gop_number >= s->gop_size ||*/
1458  !s->next_picture_ptr || s->intra_only) {
1459  s->reordered_input_picture[0] = s->input_picture[0];
1462  s->coded_picture_number++;
1463  } else {
1464  int b_frames;
1465 
1466  if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1467  for (i = 0; i < s->max_b_frames + 1; i++) {
1468  int pict_num = s->input_picture[0]->f->display_picture_number + i;
1469 
1470  if (pict_num >= s->rc_context.num_entries)
1471  break;
1472  if (!s->input_picture[i]) {
1473  s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1474  break;
1475  }
1476 
1477  s->input_picture[i]->f->pict_type =
1478  s->rc_context.entry[pict_num].new_pict_type;
1479  }
1480  }
1481 
1482  if (s->avctx->b_frame_strategy == 0) {
1483  b_frames = s->max_b_frames;
1484  while (b_frames && !s->input_picture[b_frames])
1485  b_frames--;
1486  } else if (s->avctx->b_frame_strategy == 1) {
1487  for (i = 1; i < s->max_b_frames + 1; i++) {
1488  if (s->input_picture[i] &&
1489  s->input_picture[i]->b_frame_score == 0) {
1490  s->input_picture[i]->b_frame_score =
1491  get_intra_count(s,
1492  s->input_picture[i ]->f->data[0],
1493  s->input_picture[i - 1]->f->data[0],
1494  s->linesize) + 1;
1495  }
1496  }
1497  for (i = 0; i < s->max_b_frames + 1; i++) {
1498  if (!s->input_picture[i] ||
1499  s->input_picture[i]->b_frame_score - 1 >
1500  s->mb_num / s->avctx->b_sensitivity)
1501  break;
1502  }
1503 
1504  b_frames = FFMAX(0, i - 1);
1505 
1506  /* reset scores */
1507  for (i = 0; i < b_frames + 1; i++) {
1508  s->input_picture[i]->b_frame_score = 0;
1509  }
1510  } else if (s->avctx->b_frame_strategy == 2) {
1511  b_frames = estimate_best_b_count(s);
1512  } else {
1513  av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n");
1514  b_frames = 0;
1515  }
1516 
1517  emms_c();
1518 
1519  for (i = b_frames - 1; i >= 0; i--) {
1520  int type = s->input_picture[i]->f->pict_type;
1521  if (type && type != AV_PICTURE_TYPE_B)
1522  b_frames = i;
1523  }
1524  if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1525  b_frames == s->max_b_frames) {
1527  "warning, too many b frames in a row\n");
1528  }
1529 
1530  if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1531  if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1532  s->gop_size > s->picture_in_gop_number) {
1533  b_frames = s->gop_size - s->picture_in_gop_number - 1;
1534  } else {
1536  b_frames = 0;
1537  s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1538  }
1539  }
1540 
1541  if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1542  s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1543  b_frames--;
1544 
1545  s->reordered_input_picture[0] = s->input_picture[b_frames];
1549  s->coded_picture_number++;
1550  for (i = 0; i < b_frames; i++) {
1551  s->reordered_input_picture[i + 1] = s->input_picture[i];
1552  s->reordered_input_picture[i + 1]->f->pict_type =
1555  s->coded_picture_number++;
1556  }
1557  }
1558  }
1559 no_output_pic:
1560  if (s->reordered_input_picture[0]) {
1563  AV_PICTURE_TYPE_B ? 3 : 0;
1564 
1566  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1567  return ret;
1568 
1569  if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1570  // input is a shared pix, so we can't modifiy it -> alloc a new
1571  // one & ensure that the shared one is reuseable
1572 
1573  Picture *pic;
1574  int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1575  if (i < 0)
1576  return i;
1577  pic = &s->picture[i];
1578 
1580  if (alloc_picture(s, pic, 0) < 0) {
1581  return -1;
1582  }
1583 
1584  ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1585  if (ret < 0)
1586  return ret;
1587 
1588  /* mark us unused / free shared pic */
1590  s->reordered_input_picture[0]->shared = 0;
1591 
1592  s->current_picture_ptr = pic;
1593  } else {
1594  // input is not a shared pix -> reuse buffer for current_pix
1596  for (i = 0; i < 4; i++) {
1597  s->new_picture.f->data[i] += INPLACE_OFFSET;
1598  }
1599  }
1601  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1602  s->current_picture_ptr)) < 0)
1603  return ret;
1604 
1606  } else {
1608  }
1609  return 0;
1610 }
1611 
1612 static void frame_end(MpegEncContext *s)
1613 {
1614  if (s->unrestricted_mv &&
1616  !s->intra_only) {
1618  int hshift = desc->log2_chroma_w;
1619  int vshift = desc->log2_chroma_h;
1621  s->current_picture.f->linesize[0],
1622  s->h_edge_pos, s->v_edge_pos,
1624  EDGE_TOP | EDGE_BOTTOM);
1626  s->current_picture.f->linesize[1],
1627  s->h_edge_pos >> hshift,
1628  s->v_edge_pos >> vshift,
1629  EDGE_WIDTH >> hshift,
1630  EDGE_WIDTH >> vshift,
1631  EDGE_TOP | EDGE_BOTTOM);
1633  s->current_picture.f->linesize[2],
1634  s->h_edge_pos >> hshift,
1635  s->v_edge_pos >> vshift,
1636  EDGE_WIDTH >> hshift,
1637  EDGE_WIDTH >> vshift,
1638  EDGE_TOP | EDGE_BOTTOM);
1639  }
1640 
1641  emms_c();
1642 
1643  s->last_pict_type = s->pict_type;
1645  if (s->pict_type!= AV_PICTURE_TYPE_B)
1647 
1648 #if FF_API_CODED_FRAME
1652 #endif
1653 }
1654 
1656 {
1657  int intra, i;
1658 
1659  for (intra = 0; intra < 2; intra++) {
1660  if (s->dct_count[intra] > (1 << 16)) {
1661  for (i = 0; i < 64; i++) {
1662  s->dct_error_sum[intra][i] >>= 1;
1663  }
1664  s->dct_count[intra] >>= 1;
1665  }
1666 
1667  for (i = 0; i < 64; i++) {
1668  s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1669  s->dct_count[intra] +
1670  s->dct_error_sum[intra][i] / 2) /
1671  (s->dct_error_sum[intra][i] + 1);
1672  }
1673  }
1674 }
1675 
1677 {
1678  int ret;
1679 
1680  /* mark & release old frames */
1681  if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1683  s->last_picture_ptr->f->buf[0]) {
1685  }
1686 
1689 
1691  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1692  s->current_picture_ptr)) < 0)
1693  return ret;
1694 
1695  if (s->pict_type != AV_PICTURE_TYPE_B) {
1697  if (!s->droppable)
1699  }
1700 
1701  if (s->last_picture_ptr) {
1703  if (s->last_picture_ptr->f->buf[0] &&
1704  (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1705  s->last_picture_ptr)) < 0)
1706  return ret;
1707  }
1708  if (s->next_picture_ptr) {
1710  if (s->next_picture_ptr->f->buf[0] &&
1711  (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1712  s->next_picture_ptr)) < 0)
1713  return ret;
1714  }
1715 
1716  if (s->picture_structure!= PICT_FRAME) {
1717  int i;
1718  for (i = 0; i < 4; i++) {
1720  s->current_picture.f->data[i] +=
1721  s->current_picture.f->linesize[i];
1722  }
1723  s->current_picture.f->linesize[i] *= 2;
1724  s->last_picture.f->linesize[i] *= 2;
1725  s->next_picture.f->linesize[i] *= 2;
1726  }
1727  }
1728 
1729  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1732  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1735  } else {
1738  }
1739 
1740  if (s->dct_error_sum) {
1743  }
1744 
1745  return 0;
1746 }
1747 
1749  const AVFrame *pic_arg, int *got_packet)
1750 {
1751  MpegEncContext *s = avctx->priv_data;
1752  int i, stuffing_count, ret;
1753  int context_count = s->slice_context_count;
1754 
1755  s->vbv_ignore_qmax = 0;
1756 
1757  s->picture_in_gop_number++;
1758 
1759  if (load_input_picture(s, pic_arg) < 0)
1760  return -1;
1761 
1762  if (select_input_picture(s) < 0) {
1763  return -1;
1764  }
1765 
1766  /* output? */
1767  if (s->new_picture.f->data[0]) {
1768  int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
1769  int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - AV_INPUT_BUFFER_PADDING_SIZE
1770  :
1771  s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000;
1772  if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size, 0)) < 0)
1773  return ret;
1774  if (s->mb_info) {
1777  s->mb_width*s->mb_height*12);
1778  s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1779  }
1780 
1781  for (i = 0; i < context_count; i++) {
1782  int start_y = s->thread_context[i]->start_mb_y;
1783  int end_y = s->thread_context[i]-> end_mb_y;
1784  int h = s->mb_height;
1785  uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1786  uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1787 
1788  init_put_bits(&s->thread_context[i]->pb, start, end - start);
1789  }
1790 
1791  s->pict_type = s->new_picture.f->pict_type;
1792  //emms_c();
1793  ret = frame_start(s);
1794  if (ret < 0)
1795  return ret;
1796 vbv_retry:
1797  ret = encode_picture(s, s->picture_number);
1798  if (growing_buffer) {
1799  av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1800  pkt->data = s->pb.buf;
1801  pkt->size = avctx->internal->byte_buffer_size;
1802  }
1803  if (ret < 0)
1804  return -1;
1805 
1806  avctx->header_bits = s->header_bits;
1807  avctx->mv_bits = s->mv_bits;
1808  avctx->misc_bits = s->misc_bits;
1809  avctx->i_tex_bits = s->i_tex_bits;
1810  avctx->p_tex_bits = s->p_tex_bits;
1811  avctx->i_count = s->i_count;
1812  // FIXME f/b_count in avctx
1813  avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1814  avctx->skip_count = s->skip_count;
1815 
1816  frame_end(s);
1817 
1820 
1821  if (avctx->rc_buffer_size) {
1822  RateControlContext *rcc = &s->rc_context;
1823  int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1824 
1825  if (put_bits_count(&s->pb) > max_size &&
1826  s->lambda < s->lmax) {
1827  s->next_lambda = FFMAX(s->lambda + 1, s->lambda *
1828  (s->qscale + 1) / s->qscale);
1829  if (s->adaptive_quant) {
1830  int i;
1831  for (i = 0; i < s->mb_height * s->mb_stride; i++)
1832  s->lambda_table[i] =
1833  FFMAX(s->lambda_table[i] + 1,
1834  s->lambda_table[i] * (s->qscale + 1) /
1835  s->qscale);
1836  }
1837  s->mb_skipped = 0; // done in frame_start()
1838  // done in encode_picture() so we must undo it
1839  if (s->pict_type == AV_PICTURE_TYPE_P) {
1840  if (s->flipflop_rounding ||
1841  s->codec_id == AV_CODEC_ID_H263P ||
1843  s->no_rounding ^= 1;
1844  }
1845  if (s->pict_type != AV_PICTURE_TYPE_B) {
1846  s->time_base = s->last_time_base;
1847  s->last_non_b_time = s->time - s->pp_time;
1848  }
1849  for (i = 0; i < context_count; i++) {
1850  PutBitContext *pb = &s->thread_context[i]->pb;
1851  init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1852  }
1853  s->vbv_ignore_qmax = 1;
1854  av_log(s->avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1855  goto vbv_retry;
1856  }
1857 
1859  }
1860 
1861  if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
1863 
1864  for (i = 0; i < 4; i++) {
1865  s->current_picture_ptr->f->error[i] =
1866  s->current_picture.f->error[i] =
1867  s->current_picture.error[i];
1868  avctx->error[i] += s->current_picture_ptr->f->error[i];
1869  }
1872  (s->avctx->flags&AV_CODEC_FLAG_PSNR) ? 4 : 0,
1873  s->pict_type);
1874 
1875  if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
1876  assert(avctx->header_bits + avctx->mv_bits + avctx->misc_bits +
1877  avctx->i_tex_bits + avctx->p_tex_bits ==
1878  put_bits_count(&s->pb));
1879  flush_put_bits(&s->pb);
1880  s->frame_bits = put_bits_count(&s->pb);
1881 
1882  stuffing_count = ff_vbv_update(s, s->frame_bits);
1883  s->stuffing_bits = 8*stuffing_count;
1884  if (stuffing_count) {
1885  if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1886  stuffing_count + 50) {
1887  av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1888  return -1;
1889  }
1890 
1891  switch (s->codec_id) {
1894  while (stuffing_count--) {
1895  put_bits(&s->pb, 8, 0);
1896  }
1897  break;
1898  case AV_CODEC_ID_MPEG4:
1899  put_bits(&s->pb, 16, 0);
1900  put_bits(&s->pb, 16, 0x1C3);
1901  stuffing_count -= 4;
1902  while (stuffing_count--) {
1903  put_bits(&s->pb, 8, 0xFF);
1904  }
1905  break;
1906  default:
1907  av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1908  }
1909  flush_put_bits(&s->pb);
1910  s->frame_bits = put_bits_count(&s->pb);
1911  }
1912 
1913  /* update mpeg1/2 vbv_delay for CBR */
1914  if (s->avctx->rc_max_rate &&
1915  s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
1916  s->out_format == FMT_MPEG1 &&
1917  90000LL * (avctx->rc_buffer_size - 1) <=
1918  s->avctx->rc_max_rate * 0xFFFFLL) {
1919  int vbv_delay, min_delay;
1920  double inbits = s->avctx->rc_max_rate *
1921  av_q2d(s->avctx->time_base);
1922  int minbits = s->frame_bits - 8 *
1923  (s->vbv_delay_ptr - s->pb.buf - 1);
1924  double bits = s->rc_context.buffer_index + minbits - inbits;
1925 
1926  if (bits < 0)
1928  "Internal error, negative bits\n");
1929 
1930  assert(s->repeat_first_field == 0);
1931 
1932  vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
1933  min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
1934  s->avctx->rc_max_rate;
1935 
1936  vbv_delay = FFMAX(vbv_delay, min_delay);
1937 
1938  av_assert0(vbv_delay < 0xFFFF);
1939 
1940  s->vbv_delay_ptr[0] &= 0xF8;
1941  s->vbv_delay_ptr[0] |= vbv_delay >> 13;
1942  s->vbv_delay_ptr[1] = vbv_delay >> 5;
1943  s->vbv_delay_ptr[2] &= 0x07;
1944  s->vbv_delay_ptr[2] |= vbv_delay << 3;
1945  avctx->vbv_delay = vbv_delay * 300;
1946  }
1947  s->total_bits += s->frame_bits;
1948  avctx->frame_bits = s->frame_bits;
1949 
1950  pkt->pts = s->current_picture.f->pts;
1951  if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
1953  pkt->dts = pkt->pts - s->dts_delta;
1954  else
1955  pkt->dts = s->reordered_pts;
1956  s->reordered_pts = pkt->pts;
1957  } else
1958  pkt->dts = pkt->pts;
1959  if (s->current_picture.f->key_frame)
1960  pkt->flags |= AV_PKT_FLAG_KEY;
1961  if (s->mb_info)
1963  } else {
1964  s->frame_bits = 0;
1965  }
1966 
1967  /* release non-reference frames */
1968  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1969  if (!s->picture[i].reference)
1970  ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
1971  }
1972 
1973  av_assert1((s->frame_bits & 7) == 0);
1974 
1975  pkt->size = s->frame_bits / 8;
1976  *got_packet = !!pkt->size;
1977  return 0;
1978 }
1979 
1981  int n, int threshold)
1982 {
1983  static const char tab[64] = {
1984  3, 2, 2, 1, 1, 1, 1, 1,
1985  1, 1, 1, 1, 1, 1, 1, 1,
1986  1, 1, 1, 1, 1, 1, 1, 1,
1987  0, 0, 0, 0, 0, 0, 0, 0,
1988  0, 0, 0, 0, 0, 0, 0, 0,
1989  0, 0, 0, 0, 0, 0, 0, 0,
1990  0, 0, 0, 0, 0, 0, 0, 0,
1991  0, 0, 0, 0, 0, 0, 0, 0
1992  };
1993  int score = 0;
1994  int run = 0;
1995  int i;
1996  int16_t *block = s->block[n];
1997  const int last_index = s->block_last_index[n];
1998  int skip_dc;
1999 
2000  if (threshold < 0) {
2001  skip_dc = 0;
2002  threshold = -threshold;
2003  } else
2004  skip_dc = 1;
2005 
2006  /* Are all we could set to zero already zero? */
2007  if (last_index <= skip_dc - 1)
2008  return;
2009 
2010  for (i = 0; i <= last_index; i++) {
2011  const int j = s->intra_scantable.permutated[i];
2012  const int level = FFABS(block[j]);
2013  if (level == 1) {
2014  if (skip_dc && i == 0)
2015  continue;
2016  score += tab[run];
2017  run = 0;
2018  } else if (level > 1) {
2019  return;
2020  } else {
2021  run++;
2022  }
2023  }
2024  if (score >= threshold)
2025  return;
2026  for (i = skip_dc; i <= last_index; i++) {
2027  const int j = s->intra_scantable.permutated[i];
2028  block[j] = 0;
2029  }
2030  if (block[0])
2031  s->block_last_index[n] = 0;
2032  else
2033  s->block_last_index[n] = -1;
2034 }
2035 
2036 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2037  int last_index)
2038 {
2039  int i;
2040  const int maxlevel = s->max_qcoeff;
2041  const int minlevel = s->min_qcoeff;
2042  int overflow = 0;
2043 
2044  if (s->mb_intra) {
2045  i = 1; // skip clipping of intra dc
2046  } else
2047  i = 0;
2048 
2049  for (; i <= last_index; i++) {
2050  const int j = s->intra_scantable.permutated[i];
2051  int level = block[j];
2052 
2053  if (level > maxlevel) {
2054  level = maxlevel;
2055  overflow++;
2056  } else if (level < minlevel) {
2057  level = minlevel;
2058  overflow++;
2059  }
2060 
2061  block[j] = level;
2062  }
2063 
2064  if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2065  av_log(s->avctx, AV_LOG_INFO,
2066  "warning, clipping %d dct coefficients to %d..%d\n",
2067  overflow, minlevel, maxlevel);
2068 }
2069 
2070 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2071 {
2072  int x, y;
2073  // FIXME optimize
2074  for (y = 0; y < 8; y++) {
2075  for (x = 0; x < 8; x++) {
2076  int x2, y2;
2077  int sum = 0;
2078  int sqr = 0;
2079  int count = 0;
2080 
2081  for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2082  for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2083  int v = ptr[x2 + y2 * stride];
2084  sum += v;
2085  sqr += v * v;
2086  count++;
2087  }
2088  }
2089  weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2090  }
2091  }
2092 }
2093 
2095  int motion_x, int motion_y,
2096  int mb_block_height,
2097  int mb_block_width,
2098  int mb_block_count)
2099 {
2100  int16_t weight[12][64];
2101  int16_t orig[12][64];
2102  const int mb_x = s->mb_x;
2103  const int mb_y = s->mb_y;
2104  int i;
2105  int skip_dct[12];
2106  int dct_offset = s->linesize * 8; // default for progressive frames
2107  int uv_dct_offset = s->uvlinesize * 8;
2108  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2109  ptrdiff_t wrap_y, wrap_c;
2110 
2111  for (i = 0; i < mb_block_count; i++)
2112  skip_dct[i] = s->skipdct;
2113 
2114  if (s->adaptive_quant) {
2115  const int last_qp = s->qscale;
2116  const int mb_xy = mb_x + mb_y * s->mb_stride;
2117 
2118  s->lambda = s->lambda_table[mb_xy];
2119  update_qscale(s);
2120 
2121  if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2122  s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2123  s->dquant = s->qscale - last_qp;
2124 
2125  if (s->out_format == FMT_H263) {
2126  s->dquant = av_clip(s->dquant, -2, 2);
2127 
2128  if (s->codec_id == AV_CODEC_ID_MPEG4) {
2129  if (!s->mb_intra) {
2130  if (s->pict_type == AV_PICTURE_TYPE_B) {
2131  if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2132  s->dquant = 0;
2133  }
2134  if (s->mv_type == MV_TYPE_8X8)
2135  s->dquant = 0;
2136  }
2137  }
2138  }
2139  }
2140  ff_set_qscale(s, last_qp + s->dquant);
2141  } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2142  ff_set_qscale(s, s->qscale + s->dquant);
2143 
2144  wrap_y = s->linesize;
2145  wrap_c = s->uvlinesize;
2146  ptr_y = s->new_picture.f->data[0] +
2147  (mb_y * 16 * wrap_y) + mb_x * 16;
2148  ptr_cb = s->new_picture.f->data[1] +
2149  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2150  ptr_cr = s->new_picture.f->data[2] +
2151  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2152 
2153  if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2154  uint8_t *ebuf = s->sc.edge_emu_buffer + 36 * wrap_y;
2155  int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
2156  int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
2157  s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2158  wrap_y, wrap_y,
2159  16, 16, mb_x * 16, mb_y * 16,
2160  s->width, s->height);
2161  ptr_y = ebuf;
2162  s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2163  wrap_c, wrap_c,
2164  mb_block_width, mb_block_height,
2165  mb_x * mb_block_width, mb_y * mb_block_height,
2166  cw, ch);
2167  ptr_cb = ebuf + 16 * wrap_y;
2168  s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2169  wrap_c, wrap_c,
2170  mb_block_width, mb_block_height,
2171  mb_x * mb_block_width, mb_y * mb_block_height,
2172  cw, ch);
2173  ptr_cr = ebuf + 16 * wrap_y + 16;
2174  }
2175 
2176  if (s->mb_intra) {
2178  int progressive_score, interlaced_score;
2179 
2180  s->interlaced_dct = 0;
2181  progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2182  s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2183  NULL, wrap_y, 8) - 400;
2184 
2185  if (progressive_score > 0) {
2186  interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2187  NULL, wrap_y * 2, 8) +
2188  s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2189  NULL, wrap_y * 2, 8);
2190  if (progressive_score > interlaced_score) {
2191  s->interlaced_dct = 1;
2192 
2193  dct_offset = wrap_y;
2194  uv_dct_offset = wrap_c;
2195  wrap_y <<= 1;
2196  if (s->chroma_format == CHROMA_422 ||
2197  s->chroma_format == CHROMA_444)
2198  wrap_c <<= 1;
2199  }
2200  }
2201  }
2202 
2203  s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2204  s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2205  s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2206  s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2207 
2208  if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2209  skip_dct[4] = 1;
2210  skip_dct[5] = 1;
2211  } else {
2212  s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2213  s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2214  if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
2215  s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2216  s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2217  } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
2218  s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2219  s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2220  s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2221  s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2222  s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2223  s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2224  }
2225  }
2226  } else {
2227  op_pixels_func (*op_pix)[4];
2228  qpel_mc_func (*op_qpix)[16];
2229  uint8_t *dest_y, *dest_cb, *dest_cr;
2230 
2231  dest_y = s->dest[0];
2232  dest_cb = s->dest[1];
2233  dest_cr = s->dest[2];
2234 
2235  if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2236  op_pix = s->hdsp.put_pixels_tab;
2237  op_qpix = s->qdsp.put_qpel_pixels_tab;
2238  } else {
2239  op_pix = s->hdsp.put_no_rnd_pixels_tab;
2240  op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2241  }
2242 
2243  if (s->mv_dir & MV_DIR_FORWARD) {
2244  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2245  s->last_picture.f->data,
2246  op_pix, op_qpix);
2247  op_pix = s->hdsp.avg_pixels_tab;
2248  op_qpix = s->qdsp.avg_qpel_pixels_tab;
2249  }
2250  if (s->mv_dir & MV_DIR_BACKWARD) {
2251  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2252  s->next_picture.f->data,
2253  op_pix, op_qpix);
2254  }
2255 
2257  int progressive_score, interlaced_score;
2258 
2259  s->interlaced_dct = 0;
2260  progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2261  s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2262  ptr_y + wrap_y * 8,
2263  wrap_y, 8) - 400;
2264 
2265  if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2266  progressive_score -= 400;
2267 
2268  if (progressive_score > 0) {
2269  interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2270  wrap_y * 2, 8) +
2271  s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2272  ptr_y + wrap_y,
2273  wrap_y * 2, 8);
2274 
2275  if (progressive_score > interlaced_score) {
2276  s->interlaced_dct = 1;
2277 
2278  dct_offset = wrap_y;
2279  uv_dct_offset = wrap_c;
2280  wrap_y <<= 1;
2281  if (s->chroma_format == CHROMA_422)
2282  wrap_c <<= 1;
2283  }
2284  }
2285  }
2286 
2287  s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2288  s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2289  s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2290  dest_y + dct_offset, wrap_y);
2291  s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2292  dest_y + dct_offset + 8, wrap_y);
2293 
2294  if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2295  skip_dct[4] = 1;
2296  skip_dct[5] = 1;
2297  } else {
2298  s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2299  s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2300  if (!s->chroma_y_shift) { /* 422 */
2301  s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2302  dest_cb + uv_dct_offset, wrap_c);
2303  s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2304  dest_cr + uv_dct_offset, wrap_c);
2305  }
2306  }
2307  /* pre quantization */
2308  if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2309  2 * s->qscale * s->qscale) {
2310  // FIXME optimize
2311  if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2312  skip_dct[0] = 1;
2313  if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2314  skip_dct[1] = 1;
2315  if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2316  wrap_y, 8) < 20 * s->qscale)
2317  skip_dct[2] = 1;
2318  if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2319  wrap_y, 8) < 20 * s->qscale)
2320  skip_dct[3] = 1;
2321  if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2322  skip_dct[4] = 1;
2323  if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2324  skip_dct[5] = 1;
2325  if (!s->chroma_y_shift) { /* 422 */
2326  if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2327  dest_cb + uv_dct_offset,
2328  wrap_c, 8) < 20 * s->qscale)
2329  skip_dct[6] = 1;
2330  if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2331  dest_cr + uv_dct_offset,
2332  wrap_c, 8) < 20 * s->qscale)
2333  skip_dct[7] = 1;
2334  }
2335  }
2336  }
2337 
2338  if (s->quantizer_noise_shaping) {
2339  if (!skip_dct[0])
2340  get_visual_weight(weight[0], ptr_y , wrap_y);
2341  if (!skip_dct[1])
2342  get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2343  if (!skip_dct[2])
2344  get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2345  if (!skip_dct[3])
2346  get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2347  if (!skip_dct[4])
2348  get_visual_weight(weight[4], ptr_cb , wrap_c);
2349  if (!skip_dct[5])
2350  get_visual_weight(weight[5], ptr_cr , wrap_c);
2351  if (!s->chroma_y_shift) { /* 422 */
2352  if (!skip_dct[6])
2353  get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2354  wrap_c);
2355  if (!skip_dct[7])
2356  get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2357  wrap_c);
2358  }
2359  memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2360  }
2361 
2362  /* DCT & quantize */
2363  av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2364  {
2365  for (i = 0; i < mb_block_count; i++) {
2366  if (!skip_dct[i]) {
2367  int overflow;
2368  s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2369  // FIXME we could decide to change to quantizer instead of
2370  // clipping
2371  // JS: I don't think that would be a good idea it could lower
2372  // quality instead of improve it. Just INTRADC clipping
2373  // deserves changes in quantizer
2374  if (overflow)
2375  clip_coeffs(s, s->block[i], s->block_last_index[i]);
2376  } else
2377  s->block_last_index[i] = -1;
2378  }
2379  if (s->quantizer_noise_shaping) {
2380  for (i = 0; i < mb_block_count; i++) {
2381  if (!skip_dct[i]) {
2382  s->block_last_index[i] =
2383  dct_quantize_refine(s, s->block[i], weight[i],
2384  orig[i], i, s->qscale);
2385  }
2386  }
2387  }
2388 
2389  if (s->luma_elim_threshold && !s->mb_intra)
2390  for (i = 0; i < 4; i++)
2392  if (s->chroma_elim_threshold && !s->mb_intra)
2393  for (i = 4; i < mb_block_count; i++)
2395 
2396  if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2397  for (i = 0; i < mb_block_count; i++) {
2398  if (s->block_last_index[i] == -1)
2399  s->coded_score[i] = INT_MAX / 256;
2400  }
2401  }
2402  }
2403 
2404  if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2405  s->block_last_index[4] =
2406  s->block_last_index[5] = 0;
2407  s->block[4][0] =
2408  s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2409  if (!s->chroma_y_shift) { /* 422 / 444 */
2410  for (i=6; i<12; i++) {
2411  s->block_last_index[i] = 0;
2412  s->block[i][0] = s->block[4][0];
2413  }
2414  }
2415  }
2416 
2417  // non c quantize code returns incorrect block_last_index FIXME
2418  if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2419  for (i = 0; i < mb_block_count; i++) {
2420  int j;
2421  if (s->block_last_index[i] > 0) {
2422  for (j = 63; j > 0; j--) {
2423  if (s->block[i][s->intra_scantable.permutated[j]])
2424  break;
2425  }
2426  s->block_last_index[i] = j;
2427  }
2428  }
2429  }
2430 
2431  /* huffman encode */
2432  switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2436  ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2437  break;
2438  case AV_CODEC_ID_MPEG4:
2440  ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2441  break;
2442  case AV_CODEC_ID_MSMPEG4V2:
2443  case AV_CODEC_ID_MSMPEG4V3:
2444  case AV_CODEC_ID_WMV1:
2446  ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2447  break;
2448  case AV_CODEC_ID_WMV2:
2449  if (CONFIG_WMV2_ENCODER)
2450  ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2451  break;
2452  case AV_CODEC_ID_H261:
2453  if (CONFIG_H261_ENCODER)
2454  ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2455  break;
2456  case AV_CODEC_ID_H263:
2457  case AV_CODEC_ID_H263P:
2458  case AV_CODEC_ID_FLV1:
2459  case AV_CODEC_ID_RV10:
2460  case AV_CODEC_ID_RV20:
2461  if (CONFIG_H263_ENCODER)
2462  ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2463  break;
2464  case AV_CODEC_ID_MJPEG:
2465  case AV_CODEC_ID_AMV:
2467  ff_mjpeg_encode_mb(s, s->block);
2468  break;
2469  default:
2470  av_assert1(0);
2471  }
2472 }
2473 
2474 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2475 {
2476  if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2477  else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2478  else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2479 }
2480 
2482  int i;
2483 
2484  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2485 
2486  /* mpeg1 */
2487  d->mb_skip_run= s->mb_skip_run;
2488  for(i=0; i<3; i++)
2489  d->last_dc[i] = s->last_dc[i];
2490 
2491  /* statistics */
2492  d->mv_bits= s->mv_bits;
2493  d->i_tex_bits= s->i_tex_bits;
2494  d->p_tex_bits= s->p_tex_bits;
2495  d->i_count= s->i_count;
2496  d->f_count= s->f_count;
2497  d->b_count= s->b_count;
2498  d->skip_count= s->skip_count;
2499  d->misc_bits= s->misc_bits;
2500  d->last_bits= 0;
2501 
2502  d->mb_skipped= 0;
2503  d->qscale= s->qscale;
2504  d->dquant= s->dquant;
2505 
2507 }
2508 
2510  int i;
2511 
2512  memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2513  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2514 
2515  /* mpeg1 */
2516  d->mb_skip_run= s->mb_skip_run;
2517  for(i=0; i<3; i++)
2518  d->last_dc[i] = s->last_dc[i];
2519 
2520  /* statistics */
2521  d->mv_bits= s->mv_bits;
2522  d->i_tex_bits= s->i_tex_bits;
2523  d->p_tex_bits= s->p_tex_bits;
2524  d->i_count= s->i_count;
2525  d->f_count= s->f_count;
2526  d->b_count= s->b_count;
2527  d->skip_count= s->skip_count;
2528  d->misc_bits= s->misc_bits;
2529 
2530  d->mb_intra= s->mb_intra;
2531  d->mb_skipped= s->mb_skipped;
2532  d->mv_type= s->mv_type;
2533  d->mv_dir= s->mv_dir;
2534  d->pb= s->pb;
2535  if(s->data_partitioning){
2536  d->pb2= s->pb2;
2537  d->tex_pb= s->tex_pb;
2538  }
2539  d->block= s->block;
2540  for(i=0; i<8; i++)
2541  d->block_last_index[i]= s->block_last_index[i];
2543  d->qscale= s->qscale;
2544 
2546 }
2547 
2548 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2550  int *dmin, int *next_block, int motion_x, int motion_y)
2551 {
2552  int score;
2553  uint8_t *dest_backup[3];
2554 
2555  copy_context_before_encode(s, backup, type);
2556 
2557  s->block= s->blocks[*next_block];
2558  s->pb= pb[*next_block];
2559  if(s->data_partitioning){
2560  s->pb2 = pb2 [*next_block];
2561  s->tex_pb= tex_pb[*next_block];
2562  }
2563 
2564  if(*next_block){
2565  memcpy(dest_backup, s->dest, sizeof(s->dest));
2566  s->dest[0] = s->sc.rd_scratchpad;
2567  s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2568  s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2569  av_assert0(s->linesize >= 32); //FIXME
2570  }
2571 
2572  encode_mb(s, motion_x, motion_y);
2573 
2574  score= put_bits_count(&s->pb);
2575  if(s->data_partitioning){
2576  score+= put_bits_count(&s->pb2);
2577  score+= put_bits_count(&s->tex_pb);
2578  }
2579 
2580  if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2581  ff_mpv_decode_mb(s, s->block);
2582 
2583  score *= s->lambda2;
2584  score += sse_mb(s) << FF_LAMBDA_SHIFT;
2585  }
2586 
2587  if(*next_block){
2588  memcpy(s->dest, dest_backup, sizeof(s->dest));
2589  }
2590 
2591  if(score<*dmin){
2592  *dmin= score;
2593  *next_block^=1;
2594 
2595  copy_context_after_encode(best, s, type);
2596  }
2597 }
2598 
2599 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2600  uint32_t *sq = ff_square_tab + 256;
2601  int acc=0;
2602  int x,y;
2603 
2604  if(w==16 && h==16)
2605  return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2606  else if(w==8 && h==8)
2607  return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2608 
2609  for(y=0; y<h; y++){
2610  for(x=0; x<w; x++){
2611  acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2612  }
2613  }
2614 
2615  av_assert2(acc>=0);
2616 
2617  return acc;
2618 }
2619 
2620 static int sse_mb(MpegEncContext *s){
2621  int w= 16;
2622  int h= 16;
2623 
2624  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2625  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2626 
2627  if(w==16 && h==16)
2628  if(s->avctx->mb_cmp == FF_CMP_NSSE){
2629  return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2630  s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2631  s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2632  }else{
2633  return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2634  s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2635  s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2636  }
2637  else
2638  return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2639  +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2640  +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2641 }
2642 
2644  MpegEncContext *s= *(void**)arg;
2645 
2646 
2647  s->me.pre_pass=1;
2648  s->me.dia_size= s->avctx->pre_dia_size;
2649  s->first_slice_line=1;
2650  for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2651  for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2653  }
2654  s->first_slice_line=0;
2655  }
2656 
2657  s->me.pre_pass=0;
2658 
2659  return 0;
2660 }
2661 
2663  MpegEncContext *s= *(void**)arg;
2664 
2666 
2667  s->me.dia_size= s->avctx->dia_size;
2668  s->first_slice_line=1;
2669  for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2670  s->mb_x=0; //for block init below
2672  for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2673  s->block_index[0]+=2;
2674  s->block_index[1]+=2;
2675  s->block_index[2]+=2;
2676  s->block_index[3]+=2;
2677 
2678  /* compute motion vector & mb_type and store in context */
2681  else
2683  }
2684  s->first_slice_line=0;
2685  }
2686  return 0;
2687 }
2688 
2689 static int mb_var_thread(AVCodecContext *c, void *arg){
2690  MpegEncContext *s= *(void**)arg;
2691  int mb_x, mb_y;
2692 
2694 
2695  for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2696  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2697  int xx = mb_x * 16;
2698  int yy = mb_y * 16;
2699  uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2700  int varc;
2701  int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2702 
2703  varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2704  (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2705 
2706  s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2707  s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2708  s->me.mb_var_sum_temp += varc;
2709  }
2710  }
2711  return 0;
2712 }
2713 
2716  if(s->partitioned_frame){
2718  }
2719 
2720  ff_mpeg4_stuffing(&s->pb);
2721  }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2723  }
2724 
2726  flush_put_bits(&s->pb);
2727 
2728  if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2729  s->misc_bits+= get_bits_diff(s);
2730 }
2731 
2733 {
2734  uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2735  int offset = put_bits_count(&s->pb);
2736  int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2737  int gobn = s->mb_y / s->gob_index;
2738  int pred_x, pred_y;
2739  if (CONFIG_H263_ENCODER)
2740  ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2741  bytestream_put_le32(&ptr, offset);
2742  bytestream_put_byte(&ptr, s->qscale);
2743  bytestream_put_byte(&ptr, gobn);
2744  bytestream_put_le16(&ptr, mba);
2745  bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2746  bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2747  /* 4MV not implemented */
2748  bytestream_put_byte(&ptr, 0); /* hmv2 */
2749  bytestream_put_byte(&ptr, 0); /* vmv2 */
2750 }
2751 
2752 static void update_mb_info(MpegEncContext *s, int startcode)
2753 {
2754  if (!s->mb_info)
2755  return;
2756  if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2757  s->mb_info_size += 12;
2758  s->prev_mb_info = s->last_mb_info;
2759  }
2760  if (startcode) {
2761  s->prev_mb_info = put_bits_count(&s->pb)/8;
2762  /* This might have incremented mb_info_size above, and we return without
2763  * actually writing any info into that slot yet. But in that case,
2764  * this will be called again at the start of the after writing the
2765  * start code, actually writing the mb info. */
2766  return;
2767  }
2768 
2769  s->last_mb_info = put_bits_count(&s->pb)/8;
2770  if (!s->mb_info_size)
2771  s->mb_info_size += 12;
2772  write_mb_info(s);
2773 }
2774 
2775 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2776 {
2777  if ( s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold
2778  && s->slice_context_count == 1
2779  && s->pb.buf == s->avctx->internal->byte_buffer) {
2780  int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2781  int vbv_pos = s->vbv_delay_ptr - s->pb.buf;
2782 
2783  uint8_t *new_buffer = NULL;
2784  int new_buffer_size = 0;
2785 
2786  if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2787  av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2788  return AVERROR(ENOMEM);
2789  }
2790 
2791  emms_c();
2792 
2793  av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2794  s->avctx->internal->byte_buffer_size + size_increase);
2795  if (!new_buffer)
2796  return AVERROR(ENOMEM);
2797 
2798  memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2800  s->avctx->internal->byte_buffer = new_buffer;
2801  s->avctx->internal->byte_buffer_size = new_buffer_size;
2802  rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2803  s->ptr_lastgob = s->pb.buf + lastgob_pos;
2804  s->vbv_delay_ptr = s->pb.buf + vbv_pos;
2805  }
2806  if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold)
2807  return AVERROR(EINVAL);
2808  return 0;
2809 }
2810 
2811 static int encode_thread(AVCodecContext *c, void *arg){
2812  MpegEncContext *s= *(void**)arg;
2813  int mb_x, mb_y, pdif = 0;
2814  int chr_h= 16>>s->chroma_y_shift;
2815  int i, j;
2816  MpegEncContext best_s = { 0 }, backup_s;
2817  uint8_t bit_buf[2][MAX_MB_BYTES];
2818  uint8_t bit_buf2[2][MAX_MB_BYTES];
2819  uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2820  PutBitContext pb[2], pb2[2], tex_pb[2];
2821 
2823 
2824  for(i=0; i<2; i++){
2825  init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2826  init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2827  init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2828  }
2829 
2830  s->last_bits= put_bits_count(&s->pb);
2831  s->mv_bits=0;
2832  s->misc_bits=0;
2833  s->i_tex_bits=0;
2834  s->p_tex_bits=0;
2835  s->i_count=0;
2836  s->f_count=0;
2837  s->b_count=0;
2838  s->skip_count=0;
2839 
2840  for(i=0; i<3; i++){
2841  /* init last dc values */
2842  /* note: quant matrix value (8) is implied here */
2843  s->last_dc[i] = 128 << s->intra_dc_precision;
2844 
2845  s->current_picture.error[i] = 0;
2846  }
2847  if(s->codec_id==AV_CODEC_ID_AMV){
2848  s->last_dc[0] = 128*8/13;
2849  s->last_dc[1] = 128*8/14;
2850  s->last_dc[2] = 128*8/14;
2851  }
2852  s->mb_skip_run = 0;
2853  memset(s->last_mv, 0, sizeof(s->last_mv));
2854 
2855  s->last_mv_dir = 0;
2856 
2857  switch(s->codec_id){
2858  case AV_CODEC_ID_H263:
2859  case AV_CODEC_ID_H263P:
2860  case AV_CODEC_ID_FLV1:
2861  if (CONFIG_H263_ENCODER)
2862  s->gob_index = H263_GOB_HEIGHT(s->height);
2863  break;
2864  case AV_CODEC_ID_MPEG4:
2867  break;
2868  }
2869 
2870  s->resync_mb_x=0;
2871  s->resync_mb_y=0;
2872  s->first_slice_line = 1;
2873  s->ptr_lastgob = s->pb.buf;
2874  for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2875  s->mb_x=0;
2876  s->mb_y= mb_y;
2877 
2878  ff_set_qscale(s, s->qscale);
2880 
2881  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2882  int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2883  int mb_type= s->mb_type[xy];
2884 // int d;
2885  int dmin= INT_MAX;
2886  int dir;
2887  int size_increase = s->avctx->internal->byte_buffer_size/4
2888  + s->mb_width*MAX_MB_BYTES;
2889 
2890  ff_mpv_reallocate_putbitbuffer(s, MAX_MB_BYTES, size_increase);
2891  if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
2892  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2893  return -1;
2894  }
2895  if(s->data_partitioning){
2896  if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
2897  || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
2898  av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
2899  return -1;
2900  }
2901  }
2902 
2903  s->mb_x = mb_x;
2904  s->mb_y = mb_y; // moved into loop, can get changed by H.261
2906 
2909  xy= s->mb_y*s->mb_stride + s->mb_x;
2910  mb_type= s->mb_type[xy];
2911  }
2912 
2913  /* write gob / video packet header */
2914  if(s->rtp_mode){
2915  int current_packet_size, is_gob_start;
2916 
2917  current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
2918 
2919  is_gob_start= s->avctx->rtp_payload_size && current_packet_size >= s->avctx->rtp_payload_size && mb_y + mb_x>0;
2920 
2921  if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
2922 
2923  switch(s->codec_id){
2924  case AV_CODEC_ID_H263:
2925  case AV_CODEC_ID_H263P:
2926  if(!s->h263_slice_structured)
2927  if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
2928  break;
2930  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2932  if(s->mb_skip_run) is_gob_start=0;
2933  break;
2934  case AV_CODEC_ID_MJPEG:
2935  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2936  break;
2937  }
2938 
2939  if(is_gob_start){
2940  if(s->start_mb_y != mb_y || mb_x!=0){
2941  write_slice_end(s);
2942 
2945  }
2946  }
2947 
2948  av_assert2((put_bits_count(&s->pb)&7) == 0);
2949  current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
2950 
2951  if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
2952  int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
2953  int d = 100 / s->error_rate;
2954  if(r % d == 0){
2955  current_packet_size=0;
2956  s->pb.buf_ptr= s->ptr_lastgob;
2957  assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
2958  }
2959  }
2960 
2961  if (s->avctx->rtp_callback){
2962  int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
2963  s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
2964  }
2965  update_mb_info(s, 1);
2966 
2967  switch(s->codec_id){
2968  case AV_CODEC_ID_MPEG4:
2969  if (CONFIG_MPEG4_ENCODER) {
2972  }
2973  break;
2979  }
2980  break;
2981  case AV_CODEC_ID_H263:
2982  case AV_CODEC_ID_H263P:
2983  if (CONFIG_H263_ENCODER)
2984  ff_h263_encode_gob_header(s, mb_y);
2985  break;
2986  }
2987 
2988  if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
2989  int bits= put_bits_count(&s->pb);
2990  s->misc_bits+= bits - s->last_bits;
2991  s->last_bits= bits;
2992  }
2993 
2994  s->ptr_lastgob += current_packet_size;
2995  s->first_slice_line=1;
2996  s->resync_mb_x=mb_x;
2997  s->resync_mb_y=mb_y;
2998  }
2999  }
3000 
3001  if( (s->resync_mb_x == s->mb_x)
3002  && s->resync_mb_y+1 == s->mb_y){
3003  s->first_slice_line=0;
3004  }
3005 
3006  s->mb_skipped=0;
3007  s->dquant=0; //only for QP_RD
3008 
3009  update_mb_info(s, 0);
3010 
3011  if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3012  int next_block=0;
3013  int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3014 
3015  copy_context_before_encode(&backup_s, s, -1);
3016  backup_s.pb= s->pb;
3019  if(s->data_partitioning){
3020  backup_s.pb2= s->pb2;
3021  backup_s.tex_pb= s->tex_pb;
3022  }
3023 
3024  if(mb_type&CANDIDATE_MB_TYPE_INTER){
3025  s->mv_dir = MV_DIR_FORWARD;
3026  s->mv_type = MV_TYPE_16X16;
3027  s->mb_intra= 0;
3028  s->mv[0][0][0] = s->p_mv_table[xy][0];
3029  s->mv[0][0][1] = s->p_mv_table[xy][1];
3030  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
3031  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3032  }
3033  if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
3034  s->mv_dir = MV_DIR_FORWARD;
3035  s->mv_type = MV_TYPE_FIELD;
3036  s->mb_intra= 0;
3037  for(i=0; i<2; i++){
3038  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3039  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3040  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3041  }
3042  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
3043  &dmin, &next_block, 0, 0);
3044  }
3045  if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3046  s->mv_dir = MV_DIR_FORWARD;
3047  s->mv_type = MV_TYPE_16X16;
3048  s->mb_intra= 0;
3049  s->mv[0][0][0] = 0;
3050  s->mv[0][0][1] = 0;
3051  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3052  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3053  }
3054  if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3055  s->mv_dir = MV_DIR_FORWARD;
3056  s->mv_type = MV_TYPE_8X8;
3057  s->mb_intra= 0;
3058  for(i=0; i<4; i++){
3059  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3060  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3061  }
3062  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3063  &dmin, &next_block, 0, 0);
3064  }
3065  if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3066  s->mv_dir = MV_DIR_FORWARD;
3067  s->mv_type = MV_TYPE_16X16;
3068  s->mb_intra= 0;
3069  s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3070  s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3071  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3072  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3073  }
3074  if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3075  s->mv_dir = MV_DIR_BACKWARD;
3076  s->mv_type = MV_TYPE_16X16;
3077  s->mb_intra= 0;
3078  s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3079  s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3080  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3081  &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3082  }
3083  if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3085  s->mv_type = MV_TYPE_16X16;
3086  s->mb_intra= 0;
3087  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3088  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3089  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3090  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3091  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3092  &dmin, &next_block, 0, 0);
3093  }
3094  if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3095  s->mv_dir = MV_DIR_FORWARD;
3096  s->mv_type = MV_TYPE_FIELD;
3097  s->mb_intra= 0;
3098  for(i=0; i<2; i++){
3099  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3100  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3101  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3102  }
3103  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3104  &dmin, &next_block, 0, 0);
3105  }
3106  if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3107  s->mv_dir = MV_DIR_BACKWARD;
3108  s->mv_type = MV_TYPE_FIELD;
3109  s->mb_intra= 0;
3110  for(i=0; i<2; i++){
3111  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3112  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3113  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3114  }
3115  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3116  &dmin, &next_block, 0, 0);
3117  }
3118  if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3120  s->mv_type = MV_TYPE_FIELD;
3121  s->mb_intra= 0;
3122  for(dir=0; dir<2; dir++){
3123  for(i=0; i<2; i++){
3124  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3125  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3126  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3127  }
3128  }
3129  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3130  &dmin, &next_block, 0, 0);
3131  }
3132  if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3133  s->mv_dir = 0;
3134  s->mv_type = MV_TYPE_16X16;
3135  s->mb_intra= 1;
3136  s->mv[0][0][0] = 0;
3137  s->mv[0][0][1] = 0;
3138  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3139  &dmin, &next_block, 0, 0);
3140  if(s->h263_pred || s->h263_aic){
3141  if(best_s.mb_intra)
3142  s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3143  else
3144  ff_clean_intra_table_entries(s); //old mode?
3145  }
3146  }
3147 
3148  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3149  if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3150  const int last_qp= backup_s.qscale;
3151  int qpi, qp, dc[6];
3152  int16_t ac[6][16];
3153  const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3154  static const int dquant_tab[4]={-1,1,-2,2};
3155  int storecoefs = s->mb_intra && s->dc_val[0];
3156 
3157  av_assert2(backup_s.dquant == 0);
3158 
3159  //FIXME intra
3160  s->mv_dir= best_s.mv_dir;
3161  s->mv_type = MV_TYPE_16X16;
3162  s->mb_intra= best_s.mb_intra;
3163  s->mv[0][0][0] = best_s.mv[0][0][0];
3164  s->mv[0][0][1] = best_s.mv[0][0][1];
3165  s->mv[1][0][0] = best_s.mv[1][0][0];
3166  s->mv[1][0][1] = best_s.mv[1][0][1];
3167 
3168  qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3169  for(; qpi<4; qpi++){
3170  int dquant= dquant_tab[qpi];
3171  qp= last_qp + dquant;
3172  if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3173  continue;
3174  backup_s.dquant= dquant;
3175  if(storecoefs){
3176  for(i=0; i<6; i++){
3177  dc[i]= s->dc_val[0][ s->block_index[i] ];
3178  memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3179  }
3180  }
3181 
3182  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3183  &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3184  if(best_s.qscale != qp){
3185  if(storecoefs){
3186  for(i=0; i<6; i++){
3187  s->dc_val[0][ s->block_index[i] ]= dc[i];
3188  memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3189  }
3190  }
3191  }
3192  }
3193  }
3194  }
3196  int mx= s->b_direct_mv_table[xy][0];
3197  int my= s->b_direct_mv_table[xy][1];
3198 
3199  backup_s.dquant = 0;
3201  s->mb_intra= 0;
3202  ff_mpeg4_set_direct_mv(s, mx, my);
3203  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3204  &dmin, &next_block, mx, my);
3205  }
3207  backup_s.dquant = 0;
3209  s->mb_intra= 0;
3210  ff_mpeg4_set_direct_mv(s, 0, 0);
3211  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3212  &dmin, &next_block, 0, 0);
3213  }
3214  if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3215  int coded=0;
3216  for(i=0; i<6; i++)
3217  coded |= s->block_last_index[i];
3218  if(coded){
3219  int mx,my;
3220  memcpy(s->mv, best_s.mv, sizeof(s->mv));
3221  if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3222  mx=my=0; //FIXME find the one we actually used
3223  ff_mpeg4_set_direct_mv(s, mx, my);
3224  }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3225  mx= s->mv[1][0][0];
3226  my= s->mv[1][0][1];
3227  }else{
3228  mx= s->mv[0][0][0];
3229  my= s->mv[0][0][1];
3230  }
3231 
3232  s->mv_dir= best_s.mv_dir;
3233  s->mv_type = best_s.mv_type;
3234  s->mb_intra= 0;
3235 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3236  s->mv[0][0][1] = best_s.mv[0][0][1];
3237  s->mv[1][0][0] = best_s.mv[1][0][0];
3238  s->mv[1][0][1] = best_s.mv[1][0][1];*/
3239  backup_s.dquant= 0;
3240  s->skipdct=1;
3241  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3242  &dmin, &next_block, mx, my);
3243  s->skipdct=0;
3244  }
3245  }
3246 
3247  s->current_picture.qscale_table[xy] = best_s.qscale;
3248 
3249  copy_context_after_encode(s, &best_s, -1);
3250 
3251  pb_bits_count= put_bits_count(&s->pb);
3252  flush_put_bits(&s->pb);
3253  avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3254  s->pb= backup_s.pb;
3255 
3256  if(s->data_partitioning){
3257  pb2_bits_count= put_bits_count(&s->pb2);
3258  flush_put_bits(&s->pb2);
3259  avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3260  s->pb2= backup_s.pb2;
3261 
3262  tex_pb_bits_count= put_bits_count(&s->tex_pb);
3263  flush_put_bits(&s->tex_pb);
3264  avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3265  s->tex_pb= backup_s.tex_pb;
3266  }
3267  s->last_bits= put_bits_count(&s->pb);
3268 
3269  if (CONFIG_H263_ENCODER &&
3272 
3273  if(next_block==0){ //FIXME 16 vs linesize16
3274  s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3275  s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3276  s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3277  }
3278 
3280  ff_mpv_decode_mb(s, s->block);
3281  } else {
3282  int motion_x = 0, motion_y = 0;
3284  // only one MB-Type possible
3285 
3286  switch(mb_type){
3288  s->mv_dir = 0;
3289  s->mb_intra= 1;
3290  motion_x= s->mv[0][0][0] = 0;
3291  motion_y= s->mv[0][0][1] = 0;
3292  break;
3294  s->mv_dir = MV_DIR_FORWARD;
3295  s->mb_intra= 0;
3296  motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3297  motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3298  break;
3300  s->mv_dir = MV_DIR_FORWARD;
3301  s->mv_type = MV_TYPE_FIELD;
3302  s->mb_intra= 0;
3303  for(i=0; i<2; i++){
3304  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3305  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3306  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3307  }
3308  break;
3310  s->mv_dir = MV_DIR_FORWARD;
3311  s->mv_type = MV_TYPE_8X8;
3312  s->mb_intra= 0;
3313  for(i=0; i<4; i++){
3314  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3315  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3316  }
3317  break;
3319  if (CONFIG_MPEG4_ENCODER) {
3321  s->mb_intra= 0;
3322  motion_x=s->b_direct_mv_table[xy][0];
3323  motion_y=s->b_direct_mv_table[xy][1];
3324  ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3325  }
3326  break;
3328  if (CONFIG_MPEG4_ENCODER) {
3330  s->mb_intra= 0;
3331  ff_mpeg4_set_direct_mv(s, 0, 0);
3332  }
3333  break;
3336  s->mb_intra= 0;
3337  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3338  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3339  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3340  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3341  break;
3343  s->mv_dir = MV_DIR_BACKWARD;
3344  s->mb_intra= 0;
3345  motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3346  motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3347  break;
3349  s->mv_dir = MV_DIR_FORWARD;
3350  s->mb_intra= 0;
3351  motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3352  motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3353  break;
3355  s->mv_dir = MV_DIR_FORWARD;
3356  s->mv_type = MV_TYPE_FIELD;
3357  s->mb_intra= 0;
3358  for(i=0; i<2; i++){
3359  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3360  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3361  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3362  }
3363  break;
3365  s->mv_dir = MV_DIR_BACKWARD;
3366  s->mv_type = MV_TYPE_FIELD;
3367  s->mb_intra= 0;
3368  for(i=0; i<2; i++){
3369  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3370  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3371  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3372  }
3373  break;
3376  s->mv_type = MV_TYPE_FIELD;
3377  s->mb_intra= 0;
3378  for(dir=0; dir<2; dir++){
3379  for(i=0; i<2; i++){
3380  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3381  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3382  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3383  }
3384  }
3385  break;
3386  default:
3387  av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3388  }
3389 
3390  encode_mb(s, motion_x, motion_y);
3391 
3392  // RAL: Update last macroblock type
3393  s->last_mv_dir = s->mv_dir;
3394 
3395  if (CONFIG_H263_ENCODER &&
3398 
3399  ff_mpv_decode_mb(s, s->block);
3400  }
3401 
3402  /* clean the MV table in IPS frames for direct mode in B frames */
3403  if(s->mb_intra /* && I,P,S_TYPE */){
3404  s->p_mv_table[xy][0]=0;
3405  s->p_mv_table[xy][1]=0;
3406  }
3407 
3408  if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3409  int w= 16;
3410  int h= 16;
3411 
3412  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3413  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3414 
3415  s->current_picture.error[0] += sse(
3416  s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3417  s->dest[0], w, h, s->linesize);
3418  s->current_picture.error[1] += sse(
3419  s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3420  s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3421  s->current_picture.error[2] += sse(
3422  s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3423  s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3424  }
3425  if(s->loop_filter){
3428  }
3429  ff_dlog(s->avctx, "MB %d %d bits\n",
3430  s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3431  }
3432  }
3433 
3434  //not beautiful here but we must write it before flushing so it has to be here
3437 
3438  write_slice_end(s);
3439 
3440  /* Send the last GOB if RTP */
3441  if (s->avctx->rtp_callback) {
3442  int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3443  pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3444  /* Call the RTP callback to send the last GOB */
3445  emms_c();
3446  s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3447  }
3448 
3449  return 0;
3450 }
3451 
3452 #define MERGE(field) dst->field += src->field; src->field=0
3454  MERGE(me.scene_change_score);
3455  MERGE(me.mc_mb_var_sum_temp);
3456  MERGE(me.mb_var_sum_temp);
3457 }
3458 
3460  int i;
3461 
3462  MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3463  MERGE(dct_count[1]);
3464  MERGE(mv_bits);
3465  MERGE(i_tex_bits);
3466  MERGE(p_tex_bits);
3467  MERGE(i_count);
3468  MERGE(f_count);
3469  MERGE(b_count);
3470  MERGE(skip_count);
3471  MERGE(misc_bits);
3472  MERGE(er.error_count);
3477 
3478  if(dst->avctx->noise_reduction){
3479  for(i=0; i<64; i++){
3480  MERGE(dct_error_sum[0][i]);
3481  MERGE(dct_error_sum[1][i]);
3482  }
3483  }
3484 
3485  assert(put_bits_count(&src->pb) % 8 ==0);
3486  assert(put_bits_count(&dst->pb) % 8 ==0);
3487  avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3488  flush_put_bits(&dst->pb);
3489 }
3490 
3491 static int estimate_qp(MpegEncContext *s, int dry_run){
3492  if (s->next_lambda){
3495  if(!dry_run) s->next_lambda= 0;
3496  } else if (!s->fixed_qscale) {
3499  if (s->current_picture.f->quality < 0)
3500  return -1;
3501  }
3502 
3503  if(s->adaptive_quant){
3504  switch(s->codec_id){
3505  case AV_CODEC_ID_MPEG4:
3508  break;
3509  case AV_CODEC_ID_H263:
3510  case AV_CODEC_ID_H263P:
3511  case AV_CODEC_ID_FLV1:
3512  if (CONFIG_H263_ENCODER)
3514  break;
3515  default:
3516  ff_init_qscale_tab(s);
3517  }
3518 
3519  s->lambda= s->lambda_table[0];
3520  //FIXME broken
3521  }else
3522  s->lambda = s->current_picture.f->quality;
3523  update_qscale(s);
3524  return 0;
3525 }
3526 
3527 /* must be called before writing the header */
3530  s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3531 
3532  if(s->pict_type==AV_PICTURE_TYPE_B){
3533  s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3534  assert(s->pb_time > 0 && s->pb_time < s->pp_time);
3535  }else{
3536  s->pp_time= s->time - s->last_non_b_time;
3537  s->last_non_b_time= s->time;
3538  assert(s->picture_number==0 || s->pp_time > 0);
3539  }
3540 }
3541 
3543 {
3544  int i, ret;
3545  int bits;
3546  int context_count = s->slice_context_count;
3547 
3549 
3550  /* Reset the average MB variance */
3551  s->me.mb_var_sum_temp =
3552  s->me.mc_mb_var_sum_temp = 0;
3553 
3554  /* we need to initialize some time vars before we can encode b-frames */
3555  // RAL: Condition added for MPEG1VIDEO
3559  ff_set_mpeg4_time(s);
3560 
3561  s->me.scene_change_score=0;
3562 
3563 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3564 
3565  if(s->pict_type==AV_PICTURE_TYPE_I){
3566  if(s->msmpeg4_version >= 3) s->no_rounding=1;
3567  else s->no_rounding=0;
3568  }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3570  s->no_rounding ^= 1;
3571  }
3572 
3573  if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3574  if (estimate_qp(s,1) < 0)
3575  return -1;
3576  ff_get_2pass_fcode(s);
3577  } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3579  s->lambda= s->last_lambda_for[s->pict_type];
3580  else
3582  update_qscale(s);
3583  }
3584 
3590  }
3591 
3592  s->mb_intra=0; //for the rate distortion & bit compare functions
3593  for(i=1; i<context_count; i++){
3595  if (ret < 0)
3596  return ret;
3597  }
3598 
3599  if(ff_init_me(s)<0)
3600  return -1;
3601 
3602  /* Estimate motion for every MB */
3603  if(s->pict_type != AV_PICTURE_TYPE_I){
3604  s->lambda = (s->lambda * s->avctx->me_penalty_compensation + 128)>>8;
3605  s->lambda2= (s->lambda2* (int64_t)s->avctx->me_penalty_compensation + 128)>>8;
3606  if (s->pict_type != AV_PICTURE_TYPE_B) {
3607  if((s->avctx->pre_me && s->last_non_b_pict_type==AV_PICTURE_TYPE_I) || s->avctx->pre_me==2){
3608  s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3609  }
3610  }
3611 
3612  s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3613  }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3614  /* I-Frame */
3615  for(i=0; i<s->mb_stride*s->mb_height; i++)
3617 
3618  if(!s->fixed_qscale){
3619  /* finding spatial complexity for I-frame rate control */
3620  s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3621  }
3622  }
3623  for(i=1; i<context_count; i++){
3625  }
3627  s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3628  emms_c();
3629 
3632  for(i=0; i<s->mb_stride*s->mb_height; i++)
3634  if(s->msmpeg4_version >= 3)
3635  s->no_rounding=1;
3636  ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3638  }
3639 
3640  if(!s->umvplus){
3643 
3645  int a,b;
3646  a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3648  s->f_code= FFMAX3(s->f_code, a, b);
3649  }
3650 
3651  ff_fix_long_p_mvs(s);
3654  int j;
3655  for(i=0; i<2; i++){
3656  for(j=0; j<2; j++)
3659  }
3660  }
3661  }
3662 
3663  if(s->pict_type==AV_PICTURE_TYPE_B){
3664  int a, b;
3665 
3668  s->f_code = FFMAX(a, b);
3669 
3672  s->b_code = FFMAX(a, b);
3673 
3679  int dir, j;
3680  for(dir=0; dir<2; dir++){
3681  for(i=0; i<2; i++){
3682  for(j=0; j<2; j++){
3685  ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3686  s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3687  }
3688  }
3689  }
3690  }
3691  }
3692  }
3693 
3694  if (estimate_qp(s, 0) < 0)
3695  return -1;
3696 
3697  if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3698  s->pict_type == AV_PICTURE_TYPE_I &&
3699  !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3700  s->qscale= 3; //reduce clipping problems
3701 
3702  if (s->out_format == FMT_MJPEG) {
3703  const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3704  const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3705 
3706  if (s->avctx->intra_matrix) {
3707  chroma_matrix =
3708  luma_matrix = s->avctx->intra_matrix;
3709  }
3710  if (s->avctx->chroma_intra_matrix)
3711  chroma_matrix = s->avctx->chroma_intra_matrix;
3712 
3713  /* for mjpeg, we do include qscale in the matrix */
3714  for(i=1;i<64;i++){
3715  int j = s->idsp.idct_permutation[i];
3716 
3717  s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3718  s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3719  }
3720  s->y_dc_scale_table=
3722  s->chroma_intra_matrix[0] =
3725  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3727  s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3728  s->qscale= 8;
3729  }
3730  if(s->codec_id == AV_CODEC_ID_AMV){
3731  static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3732  static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3733  for(i=1;i<64;i++){
3734  int j= s->idsp.idct_permutation[ff_zigzag_direct[i]];
3735 
3736  s->intra_matrix[j] = sp5x_quant_table[5*2+0][i];
3737  s->chroma_intra_matrix[j] = sp5x_quant_table[5*2+1][i];
3738  }
3739  s->y_dc_scale_table= y;
3740  s->c_dc_scale_table= c;
3741  s->intra_matrix[0] = 13;
3742  s->chroma_intra_matrix[0] = 14;
3744  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3746  s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3747  s->qscale= 8;
3748  }
3749 
3750  //FIXME var duplication
3752  s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3755 
3756  if (s->current_picture.f->key_frame)
3757  s->picture_in_gop_number=0;
3758 
3759  s->mb_x = s->mb_y = 0;
3760  s->last_bits= put_bits_count(&s->pb);
3761  switch(s->out_format) {
3762  case FMT_MJPEG:
3766  break;
3767  case FMT_H261:
3768  if (CONFIG_H261_ENCODER)
3769  ff_h261_encode_picture_header(s, picture_number);
3770  break;
3771  case FMT_H263:
3773  ff_wmv2_encode_picture_header(s, picture_number);
3774  else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3775  ff_msmpeg4_encode_picture_header(s, picture_number);
3776  else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3777  ret = ff_mpeg4_encode_picture_header(s, picture_number);
3778  if (ret < 0)
3779  return ret;
3780  } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3781  ret = ff_rv10_encode_picture_header(s, picture_number);
3782  if (ret < 0)
3783  return ret;
3784  }
3785  else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3786  ff_rv20_encode_picture_header(s, picture_number);
3787  else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3788  ff_flv_encode_picture_header(s, picture_number);
3789  else if (CONFIG_H263_ENCODER)
3790  ff_h263_encode_picture_header(s, picture_number);
3791  break;
3792  case FMT_MPEG1:
3794  ff_mpeg1_encode_picture_header(s, picture_number);
3795  break;
3796  default:
3797  av_assert0(0);
3798  }
3799  bits= put_bits_count(&s->pb);
3800  s->header_bits= bits - s->last_bits;
3801 
3802  for(i=1; i<context_count; i++){
3804  }
3805  s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3806  for(i=1; i<context_count; i++){
3807  if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3808  set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-32));
3810  }
3811  emms_c();
3812  return 0;
3813 }
3814 
3815 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3816  const int intra= s->mb_intra;
3817  int i;
3818 
3819  s->dct_count[intra]++;
3820 
3821  for(i=0; i<64; i++){
3822  int level= block[i];
3823 
3824  if(level){
3825  if(level>0){
3826  s->dct_error_sum[intra][i] += level;
3827  level -= s->dct_offset[intra][i];
3828  if(level<0) level=0;
3829  }else{
3830  s->dct_error_sum[intra][i] -= level;
3831  level += s->dct_offset[intra][i];
3832  if(level>0) level=0;
3833  }
3834  block[i]= level;
3835  }
3836  }
3837 }
3838 
3840  int16_t *block, int n,
3841  int qscale, int *overflow){
3842  const int *qmat;
3843  const uint16_t *matrix;
3844  const uint8_t *scantable= s->intra_scantable.scantable;
3845  const uint8_t *perm_scantable= s->intra_scantable.permutated;
3846  int max=0;
3847  unsigned int threshold1, threshold2;
3848  int bias=0;
3849  int run_tab[65];
3850  int level_tab[65];
3851  int score_tab[65];
3852  int survivor[65];
3853  int survivor_count;
3854  int last_run=0;
3855  int last_level=0;
3856  int last_score= 0;
3857  int last_i;
3858  int coeff[2][64];
3859  int coeff_count[64];
3860  int qmul, qadd, start_i, last_non_zero, i, dc;
3861  const int esc_length= s->ac_esc_length;
3862  uint8_t * length;
3863  uint8_t * last_length;
3864  const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3865 
3866  s->fdsp.fdct(block);
3867 
3868  if(s->dct_error_sum)
3869  s->denoise_dct(s, block);
3870  qmul= qscale*16;
3871  qadd= ((qscale-1)|1)*8;
3872 
3873  if (s->mb_intra) {
3874  int q;
3875  if (!s->h263_aic) {
3876  if (n < 4)
3877  q = s->y_dc_scale;
3878  else
3879  q = s->c_dc_scale;
3880  q = q << 3;
3881  } else{
3882  /* For AIC we skip quant/dequant of INTRADC */
3883  q = 1 << 3;
3884  qadd=0;
3885  }
3886 
3887  /* note: block[0] is assumed to be positive */
3888  block[0] = (block[0] + (q >> 1)) / q;
3889  start_i = 1;
3890  last_non_zero = 0;
3891  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
3892  matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
3893  if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
3894  bias= 1<<(QMAT_SHIFT-1);
3895 
3896  if (n > 3 && s->intra_chroma_ac_vlc_length) {
3897  length = s->intra_chroma_ac_vlc_length;
3898  last_length= s->intra_chroma_ac_vlc_last_length;
3899  } else {
3900  length = s->intra_ac_vlc_length;
3901  last_length= s->intra_ac_vlc_last_length;
3902  }
3903  } else {
3904  start_i = 0;
3905  last_non_zero = -1;
3906  qmat = s->q_inter_matrix[qscale];
3907  matrix = s->inter_matrix;
3908  length = s->inter_ac_vlc_length;
3909  last_length= s->inter_ac_vlc_last_length;
3910  }
3911  last_i= start_i;
3912 
3913  threshold1= (1<<QMAT_SHIFT) - bias - 1;
3914  threshold2= (threshold1<<1);
3915 
3916  for(i=63; i>=start_i; i--) {
3917  const int j = scantable[i];
3918  int level = block[j] * qmat[j];
3919 
3920  if(((unsigned)(level+threshold1))>threshold2){
3921  last_non_zero = i;
3922  break;
3923  }
3924  }
3925 
3926  for(i=start_i; i<=last_non_zero; i++) {
3927  const int j = scantable[i];
3928  int level = block[j] * qmat[j];
3929 
3930 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
3931 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
3932  if(((unsigned)(level+threshold1))>threshold2){
3933  if(level>0){
3934  level= (bias + level)>>QMAT_SHIFT;
3935  coeff[0][i]= level;
3936  coeff[1][i]= level-1;
3937 // coeff[2][k]= level-2;
3938  }else{
3939  level= (bias - level)>>QMAT_SHIFT;
3940  coeff[0][i]= -level;
3941  coeff[1][i]= -level+1;
3942 // coeff[2][k]= -level+2;
3943  }
3944  coeff_count[i]= FFMIN(level, 2);
3945  av_assert2(coeff_count[i]);
3946  max |=level;
3947  }else{
3948  coeff[0][i]= (level>>31)|1;
3949  coeff_count[i]= 1;
3950  }
3951  }
3952 
3953  *overflow= s->max_qcoeff < max; //overflow might have happened
3954 
3955  if(last_non_zero < start_i){
3956  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
3957  return last_non_zero;
3958  }
3959 
3960  score_tab[start_i]= 0;
3961  survivor[0]= start_i;
3962  survivor_count= 1;
3963 
3964  for(i=start_i; i<=last_non_zero; i++){
3965  int level_index, j, zero_distortion;
3966  int dct_coeff= FFABS(block[ scantable[i] ]);
3967  int best_score=256*256*256*120;
3968 
3969  if (s->fdsp.fdct == ff_fdct_ifast)
3970  dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
3971  zero_distortion= dct_coeff*dct_coeff;
3972 
3973  for(level_index=0; level_index < coeff_count[i]; level_index++){
3974  int distortion;
3975  int level= coeff[level_index][i];
3976  const int alevel= FFABS(level);
3977  int unquant_coeff;
3978 
3979  av_assert2(level);
3980 
3981  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
3982  unquant_coeff= alevel*qmul + qadd;
3983  } else if(s->out_format == FMT_MJPEG) {
3984  j = s->idsp.idct_permutation[scantable[i]];
3985  unquant_coeff = alevel * matrix[j] * 8;
3986  }else{ //MPEG1
3987  j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
3988  if(s->mb_intra){
3989  unquant_coeff = (int)( alevel * qscale * matrix[j]) >> 3;
3990  unquant_coeff = (unquant_coeff - 1) | 1;
3991  }else{
3992  unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) matrix[j])) >> 4;
3993  unquant_coeff = (unquant_coeff - 1) | 1;
3994  }
3995  unquant_coeff<<= 3;
3996  }
3997 
3998  distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
3999  level+=64;
4000  if((level&(~127)) == 0){
4001  for(j=survivor_count-1; j>=0; j--){
4002  int run= i - survivor[j];
4003  int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4004  score += score_tab[i-run];
4005 
4006  if(score < best_score){
4007  best_score= score;
4008  run_tab[i+1]= run;
4009  level_tab[i+1]= level-64;
4010  }
4011  }
4012 
4013  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4014  for(j=survivor_count-1; j>=0; j--){
4015  int run= i - survivor[j];
4016  int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4017  score += score_tab[i-run];
4018  if(score < last_score){
4019  last_score= score;
4020  last_run= run;
4021  last_level= level-64;
4022  last_i= i+1;
4023  }
4024  }
4025  }
4026  }else{
4027  distortion += esc_length*lambda;
4028  for(j=survivor_count-1; j>=0; j--){
4029  int run= i - survivor[j];
4030  int score= distortion + score_tab[i-run];
4031 
4032  if(score < best_score){
4033  best_score= score;
4034  run_tab[i+1]= run;
4035  level_tab[i+1]= level-64;
4036  }
4037  }
4038 
4039  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4040  for(j=survivor_count-1; j>=0; j--){
4041  int run= i - survivor[j];
4042  int score= distortion + score_tab[i-run];
4043  if(score < last_score){
4044  last_score= score;
4045  last_run= run;
4046  last_level= level-64;
4047  last_i= i+1;
4048  }
4049  }
4050  }
4051  }
4052  }
4053 
4054  score_tab[i+1]= best_score;
4055 
4056  //Note: there is a vlc code in mpeg4 which is 1 bit shorter then another one with a shorter run and the same level
4057  if(last_non_zero <= 27){
4058  for(; survivor_count; survivor_count--){
4059  if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4060  break;
4061  }
4062  }else{
4063  for(; survivor_count; survivor_count--){
4064  if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4065  break;
4066  }
4067  }
4068 
4069  survivor[ survivor_count++ ]= i+1;
4070  }
4071 
4072  if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4073  last_score= 256*256*256*120;
4074  for(i= survivor[0]; i<=last_non_zero + 1; i++){
4075  int score= score_tab[i];
4076  if(i) score += lambda*2; //FIXME exacter?
4077 
4078  if(score < last_score){
4079  last_score= score;
4080  last_i= i;
4081  last_level= level_tab[i];
4082  last_run= run_tab[i];
4083  }
4084  }
4085  }
4086 
4087  s->coded_score[n] = last_score;
4088 
4089  dc= FFABS(block[0]);
4090  last_non_zero= last_i - 1;
4091  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4092 
4093  if(last_non_zero < start_i)
4094  return last_non_zero;
4095 
4096  if(last_non_zero == 0 && start_i == 0){
4097  int best_level= 0;
4098  int best_score= dc * dc;
4099 
4100  for(i=0; i<coeff_count[0]; i++){
4101  int level= coeff[i][0];
4102  int alevel= FFABS(level);
4103  int unquant_coeff, score, distortion;
4104 
4105  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4106  unquant_coeff= (alevel*qmul + qadd)>>3;
4107  }else{ //MPEG1
4108  unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) matrix[0])) >> 4;
4109  unquant_coeff = (unquant_coeff - 1) | 1;
4110  }
4111  unquant_coeff = (unquant_coeff + 4) >> 3;
4112  unquant_coeff<<= 3 + 3;
4113 
4114  distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4115  level+=64;
4116  if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4117  else score= distortion + esc_length*lambda;
4118 
4119  if(score < best_score){
4120  best_score= score;
4121  best_level= level - 64;
4122  }
4123  }
4124  block[0]= best_level;
4125  s->coded_score[n] = best_score - dc*dc;
4126  if(best_level == 0) return -1;
4127  else return last_non_zero;
4128  }
4129 
4130  i= last_i;
4131  av_assert2(last_level);
4132 
4133  block[ perm_scantable[last_non_zero] ]= last_level;
4134  i -= last_run + 1;
4135 
4136  for(; i>start_i; i -= run_tab[i] + 1){
4137  block[ perm_scantable[i-1] ]= level_tab[i];
4138  }
4139 
4140  return last_non_zero;
4141 }
4142 
4143 //#define REFINE_STATS 1
4144 static int16_t basis[64][64];
4145 
4146 static void build_basis(uint8_t *perm){
4147  int i, j, x, y;
4148  emms_c();
4149  for(i=0; i<8; i++){
4150  for(j=0; j<8; j++){
4151  for(y=0; y<8; y++){
4152  for(x=0; x<8; x++){
4153  double s= 0.25*(1<<BASIS_SHIFT);
4154  int index= 8*i + j;
4155  int perm_index= perm[index];
4156  if(i==0) s*= sqrt(0.5);
4157  if(j==0) s*= sqrt(0.5);
4158  basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4159  }
4160  }
4161  }
4162  }
4163 }
4164 
4165 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4166  int16_t *block, int16_t *weight, int16_t *orig,
4167  int n, int qscale){
4168  int16_t rem[64];
4169  LOCAL_ALIGNED_16(int16_t, d1, [64]);
4170  const uint8_t *scantable= s->intra_scantable.scantable;
4171  const uint8_t *perm_scantable= s->intra_scantable.permutated;
4172 // unsigned int threshold1, threshold2;
4173 // int bias=0;
4174  int run_tab[65];
4175  int prev_run=0;
4176  int prev_level=0;
4177  int qmul, qadd, start_i, last_non_zero, i, dc;
4178  uint8_t * length;
4179  uint8_t * last_length;
4180  int lambda;
4181  int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4182 #ifdef REFINE_STATS
4183 static int count=0;
4184 static int after_last=0;
4185 static int to_zero=0;
4186 static int from_zero=0;
4187 static int raise=0;
4188 static int lower=0;
4189 static int messed_sign=0;
4190 #endif
4191 
4192  if(basis[0][0] == 0)
4194 
4195  qmul= qscale*2;
4196  qadd= (qscale-1)|1;
4197  if (s->mb_intra) {
4198  if (!s->h263_aic) {
4199  if (n < 4)
4200  q = s->y_dc_scale;
4201  else
4202  q = s->c_dc_scale;
4203  } else{
4204  /* For AIC we skip quant/dequant of INTRADC */
4205  q = 1;
4206  qadd=0;
4207  }
4208  q <<= RECON_SHIFT-3;
4209  /* note: block[0] is assumed to be positive */
4210  dc= block[0]*q;
4211 // block[0] = (block[0] + (q >> 1)) / q;
4212  start_i = 1;
4213 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4214 // bias= 1<<(QMAT_SHIFT-1);
4215  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4216  length = s->intra_chroma_ac_vlc_length;
4217  last_length= s->intra_chroma_ac_vlc_last_length;
4218  } else {
4219  length = s->intra_ac_vlc_length;
4220  last_length= s->intra_ac_vlc_last_length;
4221  }
4222  } else {
4223  dc= 0;
4224  start_i = 0;
4225  length = s->inter_ac_vlc_length;
4226  last_length= s->inter_ac_vlc_last_length;
4227  }
4228  last_non_zero = s->block_last_index[n];
4229 
4230 #ifdef REFINE_STATS
4231 {START_TIMER
4232 #endif
4233  dc += (1<<(RECON_SHIFT-1));
4234  for(i=0; i<64; i++){
4235  rem[i]= dc - (orig[i]<<RECON_SHIFT); //FIXME use orig dirrectly instead of copying to rem[]
4236  }
4237 #ifdef REFINE_STATS
4238 STOP_TIMER("memset rem[]")}
4239 #endif
4240  sum=0;
4241  for(i=0; i<64; i++){
4242  int one= 36;
4243  int qns=4;
4244  int w;
4245 
4246  w= FFABS(weight[i]) + qns*one;
4247  w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4248 
4249  weight[i] = w;
4250 // w=weight[i] = (63*qns + (w/2)) / w;
4251 
4252  av_assert2(w>0);
4253  av_assert2(w<(1<<6));
4254  sum += w*w;
4255  }
4256  lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4257 #ifdef REFINE_STATS
4258 {START_TIMER
4259 #endif
4260  run=0;
4261  rle_index=0;
4262  for(i=start_i; i<=last_non_zero; i++){
4263  int j= perm_scantable[i];
4264  const int level= block[j];
4265  int coeff;
4266 
4267  if(level){
4268  if(level<0) coeff= qmul*level - qadd;
4269  else coeff= qmul*level + qadd;
4270  run_tab[rle_index++]=run;
4271  run=0;
4272 
4273  s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4274  }else{
4275  run++;
4276  }
4277  }
4278 #ifdef REFINE_STATS
4279 if(last_non_zero>0){
4280 STOP_TIMER("init rem[]")
4281 }
4282 }
4283 
4284 {START_TIMER
4285 #endif
4286  for(;;){
4287  int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4288  int best_coeff=0;
4289  int best_change=0;
4290  int run2, best_unquant_change=0, analyze_gradient;
4291 #ifdef REFINE_STATS
4292 {START_TIMER
4293 #endif
4294  analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4295 
4296  if(analyze_gradient){
4297 #ifdef REFINE_STATS
4298 {START_TIMER
4299 #endif
4300  for(i=0; i<64; i++){
4301  int w= weight[i];
4302 
4303  d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4304  }
4305 #ifdef REFINE_STATS
4306 STOP_TIMER("rem*w*w")}
4307 {START_TIMER
4308 #endif
4309  s->fdsp.fdct(d1);
4310 #ifdef REFINE_STATS
4311 STOP_TIMER("dct")}
4312 #endif
4313  }
4314 
4315  if(start_i){
4316  const int level= block[0];
4317  int change, old_coeff;
4318 
4319  av_assert2(s->mb_intra);
4320 
4321  old_coeff= q*level;
4322 
4323  for(change=-1; change<=1; change+=2){
4324  int new_level= level + change;
4325  int score, new_coeff;
4326 
4327  new_coeff= q*new_level;
4328  if(new_coeff >= 2048 || new_coeff < 0)
4329  continue;
4330 
4331  score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4332  new_coeff - old_coeff);
4333  if(score<best_score){
4334  best_score= score;
4335  best_coeff= 0;
4336  best_change= change;
4337  best_unquant_change= new_coeff - old_coeff;
4338  }
4339  }
4340  }
4341 
4342  run=0;
4343  rle_index=0;
4344  run2= run_tab[rle_index++];
4345  prev_level=0;
4346  prev_run=0;
4347 
4348  for(i=start_i; i<64; i++){
4349  int j= perm_scantable[i];
4350  const int level= block[j];
4351  int change, old_coeff;
4352 
4353  if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4354  break;
4355 
4356  if(level){
4357  if(level<0) old_coeff= qmul*level - qadd;
4358  else old_coeff= qmul*level + qadd;
4359  run2= run_tab[rle_index++]; //FIXME ! maybe after last
4360  }else{
4361  old_coeff=0;
4362  run2--;
4363  av_assert2(run2>=0 || i >= last_non_zero );
4364  }
4365 
4366  for(change=-1; change<=1; change+=2){
4367  int new_level= level + change;
4368  int score, new_coeff, unquant_change;
4369 
4370  score=0;
4371  if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4372  continue;
4373 
4374  if(new_level){
4375  if(new_level<0) new_coeff= qmul*new_level - qadd;
4376  else new_coeff= qmul*new_level + qadd;
4377  if(new_coeff >= 2048 || new_coeff <= -2048)
4378  continue;
4379  //FIXME check for overflow
4380 
4381  if(level){
4382  if(level < 63 && level > -63){
4383  if(i < last_non_zero)
4384  score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4385  - length[UNI_AC_ENC_INDEX(run, level+64)];
4386  else
4387  score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4388  - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4389  }
4390  }else{
4391  av_assert2(FFABS(new_level)==1);
4392 
4393  if(analyze_gradient){
4394  int g= d1[ scantable[i] ];
4395  if(g && (g^new_level) >= 0)
4396  continue;
4397  }
4398 
4399  if(i < last_non_zero){
4400  int next_i= i + run2 + 1;
4401  int next_level= block[ perm_scantable[next_i] ] + 64;
4402 
4403  if(next_level&(~127))
4404  next_level= 0;
4405 
4406  if(next_i < last_non_zero)
4407  score += length[UNI_AC_ENC_INDEX(run, 65)]
4408  + length[UNI_AC_ENC_INDEX(run2, next_level)]
4409  - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4410  else
4411  score += length[UNI_AC_ENC_INDEX(run, 65)]
4412  + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4413  - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4414  }else{
4415  score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4416  if(prev_level){
4417  score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4418  - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4419  }
4420  }
4421  }
4422  }else{
4423  new_coeff=0;
4424  av_assert2(FFABS(level)==1);
4425 
4426  if(i < last_non_zero){
4427  int next_i= i + run2 + 1;
4428  int next_level= block[ perm_scantable[next_i] ] + 64;
4429 
4430  if(next_level&(~127))
4431  next_level= 0;
4432 
4433  if(next_i < last_non_zero)
4434  score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4435  - length[UNI_AC_ENC_INDEX(run2, next_level)]
4436  - length[UNI_AC_ENC_INDEX(run, 65)];
4437  else
4438  score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4439  - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4440  - length[UNI_AC_ENC_INDEX(run, 65)];
4441  }else{
4442  score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4443  if(prev_level){
4444  score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4445  - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4446  }
4447  }
4448  }
4449 
4450  score *= lambda;
4451 
4452  unquant_change= new_coeff - old_coeff;
4453  av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4454 
4455  score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4456  unquant_change);
4457  if(score<best_score){
4458  best_score= score;
4459  best_coeff= i;
4460  best_change= change;
4461  best_unquant_change= unquant_change;
4462  }
4463  }
4464  if(level){
4465  prev_level= level + 64;
4466  if(prev_level&(~127))
4467  prev_level= 0;
4468  prev_run= run;
4469  run=0;
4470  }else{
4471  run++;
4472  }
4473  }
4474 #ifdef REFINE_STATS
4475 STOP_TIMER("iterative step")}
4476 #endif
4477 
4478  if(best_change){
4479  int j= perm_scantable[ best_coeff ];
4480 
4481  block[j] += best_change;
4482 
4483  if(best_coeff > last_non_zero){
4484  last_non_zero= best_coeff;
4485  av_assert2(block[j]);
4486 #ifdef REFINE_STATS
4487 after_last++;
4488 #endif
4489  }else{
4490 #ifdef REFINE_STATS
4491 if(block[j]){
4492  if(block[j] - best_change){
4493  if(FFABS(block[j]) > FFABS(block[j] - best_change)){
4494  raise++;
4495  }else{
4496  lower++;
4497  }
4498  }else{
4499  from_zero++;
4500  }
4501 }else{
4502  to_zero++;
4503 }
4504 #endif
4505  for(; last_non_zero>=start_i; last_non_zero--){
4506  if(block[perm_scantable[last_non_zero]])
4507  break;
4508  }
4509  }
4510 #ifdef REFINE_STATS
4511 count++;
4512 if(256*256*256*64 % count == 0){
4513  av_log(s->avctx, AV_LOG_DEBUG, "after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero, raise, lower, messed_sign, s->mb_x, s->mb_y, s->picture_number);
4514 }
4515 #endif
4516  run=0;
4517  rle_index=0;
4518  for(i=start_i; i<=last_non_zero; i++){
4519  int j= perm_scantable[i];
4520  const int level= block[j];
4521 
4522  if(level){
4523  run_tab[rle_index++]=run;
4524  run=0;
4525  }else{
4526  run++;
4527  }
4528  }
4529 
4530  s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4531  }else{
4532  break;
4533  }
4534  }
4535 #ifdef REFINE_STATS
4536 if(last_non_zero>0){
4537 STOP_TIMER("iterative search")
4538 }
4539 }
4540 #endif
4541 
4542  return last_non_zero;
4543 }
4544 
4545 /**
4546  * Permute an 8x8 block according to permuatation.
4547  * @param block the block which will be permuted according to
4548  * the given permutation vector
4549  * @param permutation the permutation vector
4550  * @param last the last non zero coefficient in scantable order, used to
4551  * speed the permutation up
4552  * @param scantable the used scantable, this is only used to speed the
4553  * permutation up, the block is not (inverse) permutated
4554  * to scantable order!
4555  */
4556 static void block_permute(int16_t *block, uint8_t *permutation,
4557  const uint8_t *scantable, int last)
4558 {
4559  int i;
4560  int16_t temp[64];
4561 
4562  if (last <= 0)
4563  return;
4564  //FIXME it is ok but not clean and might fail for some permutations
4565  // if (permutation[1] == 1)
4566  // return;
4567 
4568  for (i = 0; i <= last; i++) {
4569  const int j = scantable[i];
4570  temp[j] = block[j];
4571  block[j] = 0;
4572  }
4573 
4574  for (i = 0; i <= last; i++) {
4575  const int j = scantable[i];
4576  const int perm_j = permutation[j];
4577  block[perm_j] = temp[j];
4578  }
4579 }
4580 
4582  int16_t *block, int n,
4583  int qscale, int *overflow)
4584 {
4585  int i, j, level, last_non_zero, q, start_i;
4586  const int *qmat;
4587  const uint8_t *scantable= s->intra_scantable.scantable;
4588  int bias;
4589  int max=0;
4590  unsigned int threshold1, threshold2;
4591 
4592  s->fdsp.fdct(block);
4593 
4594  if(s->dct_error_sum)
4595  s->denoise_dct(s, block);
4596 
4597  if (s->mb_intra) {
4598  if (!s->h263_aic) {
4599  if (n < 4)
4600  q = s->y_dc_scale;
4601  else
4602  q = s->c_dc_scale;
4603  q = q << 3;
4604  } else
4605  /* For AIC we skip quant/dequant of INTRADC */
4606  q = 1 << 3;
4607 
4608  /* note: block[0] is assumed to be positive */
4609  block[0] = (block[0] + (q >> 1)) / q;
4610  start_i = 1;
4611  last_non_zero = 0;
4612  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4613  bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4614  } else {
4615  start_i = 0;
4616  last_non_zero = -1;
4617  qmat = s->q_inter_matrix[qscale];
4618  bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4619  }
4620  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4621  threshold2= (threshold1<<1);
4622  for(i=63;i>=start_i;i--) {
4623  j = scantable[i];
4624  level = block[j] * qmat[j];
4625 
4626  if(((unsigned)(level+threshold1))>threshold2){
4627  last_non_zero = i;
4628  break;
4629  }else{
4630  block[j]=0;
4631  }
4632  }
4633  for(i=start_i; i<=last_non_zero; i++) {
4634  j = scantable[i];
4635  level = block[j] * qmat[j];
4636 
4637 // if( bias+level >= (1<<QMAT_SHIFT)
4638 // || bias-level >= (1<<QMAT_SHIFT)){
4639  if(((unsigned)(level+threshold1))>threshold2){
4640  if(level>0){
4641  level= (bias + level)>>QMAT_SHIFT;
4642  block[j]= level;
4643  }else{
4644  level= (bias - level)>>QMAT_SHIFT;
4645  block[j]= -level;
4646  }
4647  max |=level;
4648  }else{
4649  block[j]=0;
4650  }
4651  }
4652  *overflow= s->max_qcoeff < max; //overflow might have happened
4653 
4654  /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4655  if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4657  scantable, last_non_zero);
4658 
4659  return last_non_zero;
4660 }
4661 
4662 #define OFFSET(x) offsetof(MpegEncContext, x)
4663 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4664 static const AVOption h263_options[] = {
4665  { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4666  { "structured_slices","Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
4667  { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4669  { NULL },
4670 };
4671 
4672 static const AVClass h263_class = {
4673  .class_name = "H.263 encoder",
4674  .item_name = av_default_item_name,
4675  .option = h263_options,
4676  .version = LIBAVUTIL_VERSION_INT,
4677 };
4678 
4680  .name = "h263",
4681  .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4682  .type = AVMEDIA_TYPE_VIDEO,
4683  .id = AV_CODEC_ID_H263,
4684  .priv_data_size = sizeof(MpegEncContext),
4686  .encode2 = ff_mpv_encode_picture,
4687  .close = ff_mpv_encode_end,
4689  .priv_class = &h263_class,
4690 };
4691 
4692 static const AVOption h263p_options[] = {
4693  { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4694  { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4695  { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4696  { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
4698  { NULL },
4699 };
4700 static const AVClass h263p_class = {
4701  .class_name = "H.263p encoder",
4702  .item_name = av_default_item_name,
4703  .option = h263p_options,
4704  .version = LIBAVUTIL_VERSION_INT,
4705 };
4706 
4708  .name = "h263p",
4709  .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4710  .type = AVMEDIA_TYPE_VIDEO,
4711  .id = AV_CODEC_ID_H263P,
4712  .priv_data_size = sizeof(MpegEncContext),
4714  .encode2 = ff_mpv_encode_picture,
4715  .close = ff_mpv_encode_end,
4716  .capabilities = AV_CODEC_CAP_SLICE_THREADS,
4718  .priv_class = &h263p_class,
4719 };
4720 
4721 static const AVClass msmpeg4v2_class = {
4722  .class_name = "msmpeg4v2 encoder",
4723  .item_name = av_default_item_name,
4724  .option = ff_mpv_generic_options,
4725  .version = LIBAVUTIL_VERSION_INT,
4726 };
4727 
4729  .name = "msmpeg4v2",
4730  .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4731  .type = AVMEDIA_TYPE_VIDEO,
4732  .id = AV_CODEC_ID_MSMPEG4V2,
4733  .priv_data_size = sizeof(MpegEncContext),
4735  .encode2 = ff_mpv_encode_picture,
4736  .close = ff_mpv_encode_end,
4738  .priv_class = &msmpeg4v2_class,
4739 };
4740 
4741 static const AVClass msmpeg4v3_class = {
4742  .class_name = "msmpeg4v3 encoder",
4743  .item_name = av_default_item_name,
4744  .option = ff_mpv_generic_options,
4745  .version = LIBAVUTIL_VERSION_INT,
4746 };
4747 
4749  .name = "msmpeg4",
4750  .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4751  .type = AVMEDIA_TYPE_VIDEO,
4752  .id = AV_CODEC_ID_MSMPEG4V3,
4753  .priv_data_size = sizeof(MpegEncContext),
4755  .encode2 = ff_mpv_encode_picture,
4756  .close = ff_mpv_encode_end,
4758  .priv_class = &msmpeg4v3_class,
4759 };
4760 
4761 static const AVClass wmv1_class = {
4762  .class_name = "wmv1 encoder",
4763  .item_name = av_default_item_name,
4764  .option = ff_mpv_generic_options,
4765  .version = LIBAVUTIL_VERSION_INT,
4766 };
4767 
4769  .name = "wmv1",
4770  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4771  .type = AVMEDIA_TYPE_VIDEO,
4772  .id = AV_CODEC_ID_WMV1,
4773  .priv_data_size = sizeof(MpegEncContext),
4775  .encode2 = ff_mpv_encode_picture,
4776  .close = ff_mpv_encode_end,
4778  .priv_class = &wmv1_class,
4779 };
int last_time_base
Definition: mpegvideo.h:393
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
Definition: mpeg12data.c:41
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:797
int plane
Definition: avisynth_c.h:291
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp.c:936
static const AVClass wmv1_class
void ff_h261_reorder_mb_index(MpegEncContext *s)
Definition: h261enc.c:108
int(* try_8x8basis)(int16_t rem[64], int16_t weight[64], int16_t basis[64], int scale)
int chroma_elim_threshold
Definition: mpegvideo.h:124
void ff_jpeg_fdct_islow_10(int16_t *data)
static const AVOption h263_options[]
int frame_bits
bits used for the current frame
Definition: mpegvideo.h:345
IDCTDSPContext idsp
Definition: mpegvideo.h:237
av_cold int ff_dct_encode_init(MpegEncContext *s)
#define NULL
Definition: coverity.c:32
RateControlContext rc_context
contains stuff only accessed in ratecontrol.c
Definition: mpegvideo.h:348
const struct AVCodec * codec
Definition: avcodec.h:1521
int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
av_cold void ff_rate_control_uninit(MpegEncContext *s)
Definition: ratecontrol.c:312
#define FF_MPV_FLAG_STRICT_GOP
Definition: mpegvideo.h:563
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:2727
void ff_estimate_b_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:1509
float v
qpel_mc_func avg_qpel_pixels_tab[2][16]
Definition: qpeldsp.h:74
int picture_number
Definition: mpegvideo.h:134
const char * s
Definition: avisynth_c.h:631
#define RECON_SHIFT
attribute_deprecated int intra_quant_bias
Definition: avcodec.h:2030
me_cmp_func frame_skip_cmp[6]
Definition: me_cmp.h:76
#define CANDIDATE_MB_TYPE_SKIPPED
Definition: mpegutils.h:103
rate control context.
Definition: ratecontrol.h:63
static int shift(int a, int b)
Definition: sonic.c:82
S(GMC)-VOP MPEG4.
Definition: avutil.h:269
#define CONFIG_WMV2_ENCODER
Definition: config.h:1354
void(* dct_unquantize_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:522
void ff_mpeg1_encode_init(MpegEncContext *s)
Definition: mpeg12enc.c:1006
void av_free_packet(AVPacket *pkt)
Free a packet.
Definition: avpacket.c:284
int esc3_level_length
Definition: mpegvideo.h:443
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2129
static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
int time_increment_bits
< number of bits to represent the fractional part of time (encoder only)
Definition: mpegvideo.h:392
void ff_h263_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: ituh263enc.c:104
This structure describes decoded (raw) audio or video data.
Definition: frame.h:171
AVCodec * avcodec_find_encoder(enum AVCodecID id)
Find a registered encoder with a matching codec ID.
Definition: utils.c:3019
int16_t(* p_mv_table)[2]
MV table (1MV per MB) p-frame encoding.
Definition: mpegvideo.h:255
int mpeg_quant
0-> h263 quant 1-> mpeg quant
Definition: avcodec.h:1829
void ff_fdct_ifast(int16_t *data)
Definition: jfdctfst.c:208
AVOption.
Definition: opt.h:255
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
Definition: avpacket.c:618
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
uint8_t * fcode_tab
smallest fcode needed for each MV
Definition: mpegvideo.h:287
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
Definition: mpegvideo.h:160
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:277
void ff_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:904
const uint8_t * y_dc_scale_table
qscale -> y_dc_scale table
Definition: mpegvideo.h:195
uint8_t * mb_mean
Table for MB luminance.
Definition: mpegpicture.h:74
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:2951
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:776
int last_mv[2][2][2]
last MV, used for MV prediction in MPEG1 & B-frame MPEG4
Definition: mpegvideo.h:286
uint8_t * edge_emu_buffer
temporary buffer for if MVs point to out-of-frame data
Definition: mpegpicture.h:36
int pre_pass
= 1 for the pre pass
Definition: motion_est.h:71
#define CONFIG_RV10_ENCODER
Definition: config.h:1339
#define AV_CODEC_FLAG_LOOP_FILTER
loop filter.
Definition: avcodec.h:759
op_pixels_func avg_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:68
#define FF_MPV_FLAG_SKIP_RD
Definition: mpegvideo.h:562
AVFrame * tmp_frames[MAX_B_FRAMES+2]
Definition: mpegvideo.h:558
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:72
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:205
#define FF_CMP_NSSE
Definition: avcodec.h:1948
attribute_deprecated int rc_qmod_freq
Definition: avcodec.h:2583
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
RateControlEntry * entry
Definition: ratecontrol.h:65
qpel_mc_func put_no_rnd_qpel_pixels_tab[2][16]
Definition: qpeldsp.h:75
void(* shrink[4])(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height)
#define CANDIDATE_MB_TYPE_INTER_I
Definition: mpegutils.h:110
#define LIBAVUTIL_VERSION_INT
Definition: version.h:62
static void block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
Permute an 8x8 block according to permuatation.
else temp
Definition: vf_mcdeint.c:257
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:441
const char * g
Definition: vf_curves.c:108
void ff_h263_encode_init(MpegEncContext *s)
Definition: ituh263enc.c:762
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
Definition: mpegvideo.h:161
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
void ff_init_qscale_tab(MpegEncContext *s)
init s->current_picture.qscale_table from s->lambda_table
#define OFFSET(x)
uint16_t * mb_var
Table for MB variances.
Definition: mpegpicture.h:65
uint16_t(* q_chroma_intra_matrix16)[2][64]
Definition: mpegvideo.h:335
uint16_t chroma_intra_matrix[64]
Definition: mpegvideo.h:309
static int estimate_qp(MpegEncContext *s, int dry_run)
#define MAX_MV
Definition: motion_est.h:35
int acc
Definition: yuv2rgb.c:533
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:1790
int16_t(*[3] ac_val)[16]
used for mpeg4 AC prediction, all 3 arrays must be continuous
Definition: mpegvideo.h:201
MJPEG encoder.
void(* add_8x8basis)(int16_t rem[64], int16_t basis[64], int scale)
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
Definition: mpegvideo.h:139
#define FF_MPV_COMMON_OPTS
Definition: mpegvideo.h:573
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:2247
#define me
int msmpeg4_version
0=not msmpeg4, 1=mp41, 2=mp42, 3=mp43/divx3 4=wmv1/7 5=wmv2/8
Definition: mpegvideo.h:441
#define CANDIDATE_MB_TYPE_BIDIR
Definition: mpegutils.h:108
int num
numerator
Definition: rational.h:44
av_cold void ff_h263dsp_init(H263DSPContext *ctx)
Definition: h263dsp.c:117
void(* rtp_callback)(struct AVCodecContext *avctx, void *data, int size, int mb_nb)
Definition: avcodec.h:2750
int size
Definition: avcodec.h:1434
attribute_deprecated int lmax
Definition: avcodec.h:2687
enum AVCodecID codec_id
Definition: mpegvideo.h:119
const char * b
Definition: vf_curves.c:109
void ff_get_2pass_fcode(MpegEncContext *s)
Definition: ratecontrol.c:742
#define CONFIG_MJPEG_ENCODER
Definition: config.h:1317
void avpriv_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
Definition: bitstream.c:64
int obmc
overlapped block motion compensation
Definition: mpegvideo.h:373
void avpriv_align_put_bits(PutBitContext *s)
Pad the bitstream with zeros up to the next byte boundary.
Definition: bitstream.c:48
void ff_mpeg1_clean_buffers(MpegEncContext *s)
Definition: mpeg12.c:123
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:1912
int ff_h261_get_picture_format(int width, int height)
Definition: h261enc.c:40
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1732
#define FF_MPV_FLAG_NAQ
Definition: mpegvideo.h:566
int16_t(*[2][2] p_field_mv_table)[2]
MV table (2MV per MB) interlaced p-frame encoding.
Definition: mpegvideo.h:261
static int select_input_picture(MpegEncContext *s)
static const AVClass msmpeg4v3_class
int min_qcoeff
minimum encodable coefficient
Definition: mpegvideo.h:315
static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride)
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:126
int ildct_cmp
interlaced DCT comparison function
Definition: avcodec.h:1937
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
int coded_score[12]
Definition: mpegvideo.h:327
mpegvideo header.
const uint16_t ff_h263_format[8][2]
Definition: h263data.c:238
int rtp_payload_size
Definition: avcodec.h:2752
av_cold int ff_mjpeg_encode_init(MpegEncContext *s)
Definition: mjpegenc.c:71
int scene_change_score
Definition: motion_est.h:86
int mpv_flags
flags set by private options
Definition: mpegvideo.h:528
uint8_t permutated[64]
Definition: idctdsp.h:31
static const AVClass h263_class
uint8_t run
Definition: svq3.c:149
static AVPacket pkt
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:3013
uint8_t * intra_ac_vlc_length
Definition: mpegvideo.h:318
#define EDGE_TOP
int padding_bug_score
used to detect the VERY common padding bug in MPEG4
Definition: mpegvideo.h:416
int ff_mpeg_ref_picture(AVCodecContext *avctx, Picture *dst, Picture *src)
Definition: mpegpicture.c:358
#define UNI_AC_ENC_INDEX(run, level)
Definition: mpegvideo.h:325
int mb_num
number of MBs of a picture
Definition: mpegvideo.h:140
void ff_mjpeg_encode_picture_header(AVCodecContext *avctx, PutBitContext *pb, ScanTable *intra_scantable, uint16_t luma_intra_matrix[64], uint16_t chroma_intra_matrix[64])
av_cold void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx)
Definition: fdctdsp.c:26
int frame_skip_cmp
frame skip comparison function
Definition: avcodec.h:2716
#define FF_LAMBDA_SHIFT
Definition: avutil.h:217
static void clip_coeffs(MpegEncContext *s, int16_t *block, int last_index)
QpelDSPContext qdsp
Definition: mpegvideo.h:242
An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of structures with info about macroblo...
Definition: avcodec.h:1264
AVCodec.
Definition: avcodec.h:3482
static void write_mb_info(MpegEncContext *s)
int time_base
time in seconds of last I,P,S Frame
Definition: mpegvideo.h:394
uint8_t(* mv_penalty)[MAX_DMV *2+1]
bit amount needed to encode a MV
Definition: motion_est.h:92
int qscale
QP.
Definition: mpegvideo.h:211
int h263_aic
Advanded INTRA Coding (AIC)
Definition: mpegvideo.h:94
int16_t(* b_back_mv_table)[2]
MV table (1MV per MB) backward mode b-frame encoding.
Definition: mpegvideo.h:257
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:80
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
Definition: h263.c:310
int chroma_x_shift
Definition: mpegvideo.h:479
#define INPLACE_OFFSET
Definition: mpegvideo.h:73
int encoding
true if we are encoding (vs decoding)
Definition: mpegvideo.h:121
uint64_t vbv_delay
VBV delay coded in the last frame (in periods of a 27 MHz clock).
Definition: avcodec.h:3272
void(* dct_unquantize_h263_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:516
int field_select[2][2]
Definition: mpegvideo.h:285
void(* dct_unquantize_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:520
attribute_deprecated int me_method
This option does nothing.
Definition: avcodec.h:1739
int scenechange_threshold
scene change detection threshold 0 is default, larger means fewer detected scene changes.
Definition: avcodec.h:2089
uint32_t ff_square_tab[512]
Definition: me_cmp.c:32
#define CONFIG_RV20_ENCODER
Definition: config.h:1340
#define FFALIGN(x, a)
Definition: common.h:97
int quant_precision
Definition: mpegvideo.h:405
void ff_mpeg4_merge_partitions(MpegEncContext *s)
static int mb_var_thread(AVCodecContext *c, void *arg)
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac, coded_block for the current non-intra MB.
Definition: mpegvideo.c:2419
void(* dct_unquantize_h263_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:518
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1641
int modified_quant
Definition: mpegvideo.h:386
static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
#define FF_MPV_FLAG_CBP_RD
Definition: mpegvideo.h:565
int skipdct
skip dct and code zero residual
Definition: mpegvideo.h:227
float rc_buffer_aggressivity
Definition: mpegvideo.h:539
int b_frame_score
Definition: mpegpicture.h:84
void ff_mpeg4_clean_buffers(MpegEncContext *s)
Definition: mpeg4video.c:45
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
#define CANDIDATE_MB_TYPE_INTER
Definition: mpegutils.h:101
float p_masking
p block masking (0-> disabled)
Definition: avcodec.h:1873
int picture_in_gop_number
0-> first pic in gop, ...
Definition: mpegvideo.h:135
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:63
int alt_inter_vlc
alternative inter vlc
Definition: mpegvideo.h:385
void ff_mpeg1_encode_slice_header(MpegEncContext *s)
Definition: mpeg12enc.c:411
int ff_dct_quantize_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
uint8_t * ptr_lastgob
Definition: mpegvideo.h:495
int64_t time
time of current frame
Definition: mpegvideo.h:395
static int encode_picture(MpegEncContext *s, int picture_number)
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
int bit_rate_tolerance
number of bits the bitstream is allowed to diverge from the reference.
Definition: avcodec.h:1585
static const AVClass msmpeg4v2_class
#define MV_DIRECT
bidirectional mode where the difference equals the MV of the last P/S/I-Frame (mpeg4) ...
Definition: mpegvideo.h:272
ScratchpadContext sc
Definition: mpegvideo.h:209
uint8_t bits
Definition: crc.c:295
attribute_deprecated const char * rc_eq
Definition: avcodec.h:2606
attribute_deprecated float rc_buffer_aggressivity
Definition: avcodec.h:2628
uint8_t
#define av_cold
Definition: attributes.h:74
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:135
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:63
Picture ** input_picture
next pictures on display order for encoding
Definition: mpegvideo.h:144
#define CANDIDATE_MB_TYPE_INTER4V
Definition: mpegutils.h:102
void(* get_pixels)(int16_t *block, const uint8_t *pixels, ptrdiff_t line_size)
Definition: pixblockdsp.h:27
AVOptions.
void(* denoise_dct)(struct MpegEncContext *s, int16_t *block)
Definition: mpegvideo.h:526
PutBitContext pb2
used for data partitioned VOPs
Definition: mpegvideo.h:414
enum OutputFormat out_format
output format
Definition: mpegvideo.h:111
#define CANDIDATE_MB_TYPE_FORWARD_I
Definition: mpegutils.h:111
#define CONFIG_FAANDCT
Definition: config.h:548
uint16_t(* dct_offset)[64]
Definition: mpegvideo.h:341
void ff_dct_encode_init_x86(MpegEncContext *s)
Definition: mpegvideoenc.c:200
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
uint16_t * chroma_intra_matrix
custom intra quantization matrix Code outside libavcodec should access this field using av_codec_g/se...
Definition: avcodec.h:3419
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16])
void ff_msmpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: msmpeg4enc.c:224
static void mpv_encode_defaults(MpegEncContext *s)
Set the given MpegEncContext to defaults for encoding.
Multithreading support functions.
int pre_dia_size
ME prepass diamond size & shape.
Definition: avcodec.h:1988
AVCodec ff_h263_encoder
static const AVOption h263p_options[]
static int get_sae(uint8_t *src, int ref, int stride)
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
Definition: avcodec.h:780
void ff_free_picture_tables(Picture *pic)
Definition: mpegpicture.c:452
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:366
#define emms_c()
Definition: internal.h:53
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:257
int misc_bits
cbp, mb_type
Definition: mpegvideo.h:359
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1627
int no_rounding
apply no rounding to motion compensation (MPEG4, msmpeg4, ...) for b-frames rounding mode is always 0...
Definition: mpegvideo.h:292
H.263 tables.
#define CANDIDATE_MB_TYPE_BACKWARD_I
Definition: mpegutils.h:112
int interlaced_dct
Definition: mpegvideo.h:484
int(* q_chroma_intra_matrix)[64]
Definition: mpegvideo.h:331
int me_cmp
motion estimation comparison function
Definition: avcodec.h:1919
void ff_mpv_decode_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo.c:2708
#define QUANT_BIAS_SHIFT
Definition: mpegvideo_enc.c:68
void ff_mpeg4_encode_video_packet_header(MpegEncContext *s)
Picture current_picture
copy of the current picture structure.
Definition: mpegvideo.h:187
int avcodec_encode_video2(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
Encode a frame of video.
Definition: utils.c:2182
#define CHROMA_420
Definition: mpegvideo.h:476
int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
Definition: mpegpicture.c:438
int intra_dc_precision
Definition: mpegvideo.h:465
int repeat_first_field
Definition: mpegvideo.h:473
static AVFrame * frame
quarterpel DSP functions
#define CONFIG_MPEG1VIDEO_ENCODER
Definition: config.h:1318
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:34
static double av_q2d(AVRational a)
Convert rational to double.
Definition: rational.h:80
int16_t(* b_bidir_forw_mv_table)[2]
MV table (1MV per MB) bidir mode b-frame encoding.
Definition: mpegvideo.h:258
static void dct_single_coeff_elimination(MpegEncContext *s, int n, int threshold)
uint8_t * data
Definition: avcodec.h:1433
const uint16_t ff_aanscales[64]
Definition: aandcttab.c:26
int ff_wmv2_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: wmv2enc.c:74
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:80
#define ff_dlog(a,...)
uint16_t pp_time
time distance between the last 2 p,s,i frames
Definition: mpegvideo.h:397
me_cmp_func nsse[6]
Definition: me_cmp.h:65
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
#define lrintf(x)
Definition: libm_mips.h:70
#define CODEC_FLAG_MV0
Definition: avcodec.h:968
const uint8_t * scantable
Definition: idctdsp.h:30
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:321
int mb_height
number of MBs horizontally & vertically
Definition: mpegvideo.h:136
static void rebase_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Rebase the bit writer onto a reallocated buffer.
Definition: put_bits.h:71
float lumi_masking
luminance masking (0-> disabled)
Definition: avcodec.h:1852
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:2781
int max_qcoeff
maximum encodable coefficient
Definition: mpegvideo.h:316
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:763
high precision timer, useful to profile code
static void update_noise_reduction(MpegEncContext *s)
#define FF_MPV_FLAG_QP_RD
Definition: mpegvideo.h:564
void ff_mpeg1_encode_mb(MpegEncContext *s, int16_t block[8][64], int motion_x, int motion_y)
Definition: mpeg12enc.c:997
#define MAX_LEVEL
Definition: rl.h:35
void ff_h261_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: h261enc.c:53
int dquant
qscale difference to prev qscale
Definition: mpegvideo.h:217
int flipflop_rounding
Definition: mpegvideo.h:440
#define CHROMA_444
Definition: mpegvideo.h:478
int num_entries
number of RateControlEntries
Definition: ratecontrol.h:64
int gop_picture_number
index of the first picture of a GOP based on fake_pic_num & mpeg1 specific
Definition: mpegvideo.h:454
uint8_t * mb_info_ptr
Definition: mpegvideo.h:376
#define av_log(a,...)
static void ff_update_block_index(MpegEncContext *s)
Definition: mpegvideo.h:681
#define ff_sqrt
Definition: mathops.h:214
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
Definition: mpegvideo.c:2790
#define ROUNDED_DIV(a, b)
Definition: common.h:55
int(* q_inter_matrix)[64]
Definition: mpegvideo.h:332
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: avcodec.h:1479
#define FF_CMP_VSSE
Definition: avcodec.h:1947
static int get_bits_diff(MpegEncContext *s)
Definition: mpegvideo.h:695
#define EDGE_WIDTH
Definition: mpegpicture.h:33
int(* q_intra_matrix)[64]
precomputed matrix (combine qscale and DCT renorm)
Definition: mpegvideo.h:330
#define FF_MPV_FLAG_MV0
Definition: mpegvideo.h:567
int intra_only
if true, only intra pictures are generated
Definition: mpegvideo.h:109
av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
int avcodec_close(AVCodecContext *avctx)
Close a given AVCodecContext and free all the data associated with it (but not the AVCodecContext its...
Definition: utils.c:2924
int16_t * dc_val[3]
used for mpeg4 DC prediction, all 3 arrays must be continuous
Definition: mpegvideo.h:194
enum AVCodecID id
Definition: avcodec.h:3496
int h263_plus
h263 plus headers
Definition: mpegvideo.h:116
H263DSPContext h263dsp
Definition: mpegvideo.h:244
int slice_context_count
number of used thread_contexts
Definition: mpegvideo.h:163
#define MAX_DMV
Definition: motion_est.h:37
int last_non_b_pict_type
used for mpeg4 gmc b-frames & ratecontrol
Definition: mpegvideo.h:222
int width
width and height of the video frame
Definition: frame.h:220
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
Definition: put_bits.h:227
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:1822
int last_dc[3]
last DC values for MPEG1
Definition: mpegvideo.h:192
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:89
attribute_deprecated float rc_initial_cplx
Definition: avcodec.h:2631
uint8_t * inter_ac_vlc_last_length
Definition: mpegvideo.h:323
#define FF_MB_DECISION_BITS
chooses the one which needs the fewest bits
Definition: avcodec.h:2066
#define AV_CODEC_FLAG_4MV
4 MV per MB allowed / advanced prediction for H.263.
Definition: avcodec.h:739
int64_t total_bits
Definition: mpegvideo.h:344
#define PTRDIFF_SPECIFIER
Definition: internal.h:252
int mb_skipped
MUST BE SET only during DECODING.
Definition: mpegvideo.h:202
#define ARCH_X86
Definition: config.h:38
int chroma_y_shift
Definition: mpegvideo.h:480
int strict_std_compliance
strictly follow the std (MPEG4, ...)
Definition: mpegvideo.h:125
av_default_item_name
int partitioned_frame
is current frame partitioned
Definition: mpegvideo.h:410
uint8_t * rd_scratchpad
scratchpad for rate distortion mb decision
Definition: mpegpicture.h:37
#define AVERROR(e)
Definition: error.h:43
#define MAX_PICTURE_COUNT
Definition: mpegpicture.h:32
int frame_skip_threshold
frame skip threshold
Definition: avcodec.h:2695
av_cold int ff_rate_control_init(MpegEncContext *s)
Definition: ratecontrol.c:88
int me_sub_cmp
subpixel motion estimation comparison function
Definition: avcodec.h:1925
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:148
int qmax
maximum quantizer
Definition: avcodec.h:2564
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2157
static void update_mb_info(MpegEncContext *s, int startcode)
#define MERGE(field)
void ff_write_pass1_stats(MpegEncContext *s)
Definition: ratecontrol.c:46
int unrestricted_mv
mv can point outside of the coded picture
Definition: mpegvideo.h:230
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:178
ERContext er
Definition: mpegvideo.h:553
int last_lambda_for[5]
last lambda for a specific pict type
Definition: mpegvideo.h:226
static int sse_mb(MpegEncContext *s)
int reference
Definition: mpegpicture.h:87
const char * r
Definition: vf_curves.c:107
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
void(* dct_unquantize_mpeg2_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:514
PixblockDSPContext pdsp
Definition: mpegvideo.h:241
const char * arg
Definition: jacosubdec.c:66
uint8_t * intra_chroma_ac_vlc_length
Definition: mpegvideo.h:320
void(* dct_unquantize_mpeg1_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:508
int h263_slice_structured
Definition: mpegvideo.h:384
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1607
uint8_t * buf
Definition: put_bits.h:38
uint64_t error[AV_NUM_DATA_POINTERS]
Definition: mpegpicture.h:90
int rc_max_rate
maximum bitrate
Definition: avcodec.h:2614
int64_t av_gcd(int64_t a, int64_t b)
Return the greatest common divisor of a and b.
Definition: mathematics.c:55
GLsizei GLsizei * length
Definition: opengl_enc.c:115
MpegvideoEncDSPContext mpvencdsp
Definition: mpegvideo.h:240
attribute_deprecated int inter_quant_bias
Definition: avcodec.h:2036
const char * name
Name of the codec implementation.
Definition: avcodec.h:3489
int quarter_sample
1->qpel, 0->half pel ME/MC
Definition: mpegvideo.h:406
uint16_t * mb_type
Table for candidate MB types for encoding (defines in mpegutils.h)
Definition: mpegvideo.h:299
void ff_mjpeg_encode_picture_trailer(PutBitContext *pb, int header_bits)
int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int size)
Shrink the already allocated side data buffer.
Definition: avpacket.c:518
int low_delay
no reordering needed / has no b-frames
Definition: mpegvideo.h:411
qpel_mc_func put_qpel_pixels_tab[2][16]
Definition: qpeldsp.h:73
uint8_t *[2][2] b_field_select_table
Definition: mpegvideo.h:264
static const uint8_t offset[127][2]
Definition: vf_spp.c:92
GLsizei count
Definition: opengl_enc.c:109
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:1105
#define FFMAX(a, b)
Definition: common.h:90
Libavcodec external API header.
#define fail()
Definition: checkasm.h:57
int64_t mb_var_sum_temp
Definition: motion_est.h:85
int flags
A combination of AV_PKT_FLAG values.
Definition: avcodec.h:1439
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:85
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:71
static void frame_end(MpegEncContext *s)
int resync_mb_x
x position of last resync marker
Definition: mpegvideo.h:363
int rc_buffer_size
decoder bitstream buffer size
Definition: avcodec.h:2591
void ff_clean_h263_qscales(MpegEncContext *s)
modify qscale so that encoding is actually possible in h263 (limit difference to -2..2)
Definition: ituh263enc.c:267
int coded_picture_number
used to set pic->coded_picture_number, should not be used for/by anything else
Definition: mpegvideo.h:133
int * lambda_table
Definition: mpegvideo.h:215
static int estimate_best_b_count(MpegEncContext *s)
int intra_dc_precision
precision of the intra DC coefficient - 8
Definition: avcodec.h:2117
int me_penalty_compensation
Definition: avcodec.h:2160
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
void ff_wmv2_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: wmv2enc.c:147
common internal API header
uint8_t * intra_ac_vlc_last_length
Definition: mpegvideo.h:319
static const uint8_t non_linear_qscale[32]
Definition: mpeg12dec.c:99
static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x, int motion_y, int mb_block_height, int mb_block_width, int mb_block_count)
const uint8_t *const ff_mpeg2_dc_scale_table[4]
Definition: mpegvideodata.c:75
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
Definition: avcodec.h:2835
void ff_h263_loop_filter(MpegEncContext *s)
Definition: h263.c:136
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Definition: hpeldsp.h:38
#define CHROMA_422
Definition: mpegvideo.h:477
int bit_rate
the average bitrate
Definition: avcodec.h:1577
float border_masking
Definition: mpegvideo.h:540
int progressive_frame
Definition: mpegvideo.h:482
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:242
const uint8_t ff_h263_chroma_qscale_table[32]
Definition: h263data.c:262
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
Definition: avcodec.h:735
#define FFMIN(a, b)
Definition: common.h:92
int display_picture_number
picture number in display order
Definition: frame.h:278
uint16_t(* q_inter_matrix16)[2][64]
Definition: mpegvideo.h:336
uint8_t * vbv_delay_ptr
pointer to vbv_delay in the bitstream
Definition: mpegvideo.h:456
int fixed_qscale
fixed qscale if non zero
Definition: mpegvideo.h:120
float y
void ff_clean_mpeg4_qscales(MpegEncContext *s)
modify mb_type & qscale so that encoding is actually possible in mpeg4
#define MAX_MB_BYTES
Definition: mpegvideo.h:71
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:79
#define AV_CODEC_FLAG_AC_PRED
H.263 advanced intra coding / MPEG-4 AC prediction.
Definition: avcodec.h:793
int me_method
ME algorithm.
Definition: mpegvideo.h:266
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:149
int umvplus
== H263+ && unrestricted_mv
Definition: mpegvideo.h:382
Picture new_picture
copy of the source picture structure for encoding.
Definition: mpegvideo.h:181
int intra_quant_bias
bias for the quantizer
Definition: mpegvideo.h:313
int width
picture width / height.
Definition: avcodec.h:1691
int(* pix_sum)(uint8_t *pix, int line_size)
int16_t(*[2] motion_val)[2]
Definition: mpegpicture.h:53
Picture * current_picture_ptr
pointer to the current picture
Definition: mpegvideo.h:191
Picture.
Definition: mpegpicture.h:45
int alternate_scan
Definition: mpegvideo.h:471
float rc_max_available_vbv_use
Ratecontrol attempt to use, at maximum, of what can be used without an underflow. ...
Definition: avcodec.h:2639
int ff_rv10_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: rv10enc.c:32
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:767
float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
Definition: ratecontrol.c:753
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:751
int b_frame_strategy
Definition: avcodec.h:1807
uint16_t(* q_intra_matrix16)[2][64]
identical to the above but for MMX & these are not permutated, second 64 entries are bias ...
Definition: mpegvideo.h:334
perm
Definition: f_perms.c:74
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:68
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:283
int(* ac_stats)[2][MAX_LEVEL+1][MAX_RUN+1][2]
[mb_intra][isChroma][level][run][last]
Definition: mpegvideo.h:446
int block_last_index[12]
last non zero coefficient in block
Definition: mpegvideo.h:93
MotionEstContext me
Definition: mpegvideo.h:290
int n
Definition: avisynth_c.h:547
static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale)
uint8_t idct_permutation[64]
IDCT input permutation.
Definition: idctdsp.h:94
const int16_t ff_mpeg4_default_non_intra_matrix[64]
Definition: mpeg4data.h:348
int mb_decision
macroblock decision mode
Definition: avcodec.h:2064
#define CONFIG_FLV_ENCODER
Definition: config.h:1307
static int get_intra_count(MpegEncContext *s, uint8_t *src, uint8_t *ref, int stride)
attribute_deprecated float rc_qsquish
Definition: avcodec.h:2578
uint8_t * mbintra_table
used to avoid setting {ac, dc, cbp}-pred stuff to zero on inter MB decoding
Definition: mpegvideo.h:205
#define MAX_B_FRAMES
Definition: mpegvideo.h:67
int ff_msmpeg4_encode_init(MpegEncContext *s)
Definition: msmpeg4enc.c:121
int ac_esc_length
num of bits needed to encode the longest esc
Definition: mpegvideo.h:317
preferred ID for MPEG-1/2 video decoding
Definition: avcodec.h:107
static void set_put_bits_buffer_size(PutBitContext *s, int size)
Change the end of the buffer.
Definition: put_bits.h:261
#define FF_ARRAY_ELEMS(a)
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
Definition: avcodec.h:3043
int block_index[6]
index to current MB in block based arrays with edges
Definition: mpegvideo.h:301
#define av_log2
Definition: intmath.h:100
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:544
int * mb_index2xy
mb_index -> mb_x + mb_y*mb_stride
Definition: mpegvideo.h:305
static uint8_t default_fcode_tab[MAX_MV *2+1]
Definition: mpegvideo_enc.c:80
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: avcodec.h:924
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:232
AVCodec ff_h263p_encoder
static void build_basis(uint8_t *perm)
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:274
int frame_skip_factor
frame skip factor
Definition: avcodec.h:2702
int first_slice_line
used in mpeg4 too to handle resync markers
Definition: mpegvideo.h:439
int frame_pred_frame_dct
Definition: mpegvideo.h:466
uint16_t * mc_mb_var
Table for motion compensated MB variances.
Definition: mpegpicture.h:68
void ff_flv_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: flvenc.c:27
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:271
#define FF_COMPLIANCE_UNOFFICIAL
Allow unofficial extensions.
Definition: avcodec.h:2834
int coded_picture_number
picture number in bitstream order
Definition: frame.h:274
#define src1
Definition: h264pred.c:139
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
uint16_t inter_matrix[64]
Definition: mpegvideo.h:310
#define FF_LAMBDA_SCALE
Definition: avutil.h:218
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: frame.h:351
void ff_jpeg_fdct_islow_8(int16_t *data)
int64_t last_non_b_time
Definition: mpegvideo.h:396
void ff_h261_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: h261enc.c:237
#define QMAT_SHIFT
Definition: mpegvideo_enc.c:71
struct MpegEncContext * thread_context[MAX_THREADS]
Definition: mpegvideo.h:162
#define CONFIG_MSMPEG4_ENCODER
Definition: msmpeg4.h:75
AVS_Value src
Definition: avisynth_c.h:482
unsigned int lambda2
(lambda*lambda) >> FF_LAMBDA_SHIFT
Definition: mpegvideo.h:214
#define CODEC_FLAG_NORMALIZE_AQP
Definition: avcodec.h:995
void ff_faandct(int16_t *data)
Definition: faandct.c:123
double buffer_index
amount of bits in the video/audio buffer
Definition: ratecontrol.h:66
void ff_h263_update_motion_val(MpegEncContext *s)
Definition: h263.c:43
int h263_flv
use flv h263 header
Definition: mpegvideo.h:117
static const AVClass h263p_class
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:141
enum AVCodecID codec_id
Definition: avcodec.h:1529
#define QMAT_SHIFT_MMX
Definition: mpegvideo_enc.c:70
char * av_strdup(const char *s)
Duplicate the string s.
Definition: mem.c:267
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:199
void ff_convert_matrix(MpegEncContext *s, int(*qmat)[64], uint16_t(*qmat16)[2][64], const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra)
Definition: mpegvideo_enc.c:87
const uint16_t ff_inv_aanscales[64]
Definition: aandcttab.c:38
void ff_set_cmp(MECmpContext *c, me_cmp_func *cmp, int type)
Definition: me_cmp.c:370
#define START_TIMER
Definition: timer.h:92
int frame_bits
number of bits used for the previously encoded frame
Definition: avcodec.h:2774
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
uint8_t * intra_chroma_ac_vlc_last_length
Definition: mpegvideo.h:321
void(* fdct)(int16_t *block)
Definition: fdctdsp.h:27
main external API structure.
Definition: avcodec.h:1512
int ff_alloc_picture(AVCodecContext *avctx, Picture *pic, MotionEstContext *me, ScratchpadContext *sc, int shared, int encoding, int chroma_x_shift, int chroma_y_shift, int out_format, int mb_stride, int mb_width, int mb_height, int b8_stride, ptrdiff_t *linesize, ptrdiff_t *uvlinesize)
Allocate a Picture.
Definition: mpegpicture.c:228
ScanTable intra_scantable
Definition: mpegvideo.h:98
int pre_me
prepass for motion estimation
Definition: avcodec.h:1974
int qmin
minimum quantizer
Definition: avcodec.h:2557
int height
picture size. must be a multiple of 16
Definition: mpegvideo.h:107
static void write_slice_end(MpegEncContext *s)
int64_t dts_delta
pts difference between the first and second input frame, used for calculating dts of the first frame ...
Definition: mpegvideo.h:151
int64_t user_specified_pts
last non-zero pts from AVFrame which was passed into avcodec_encode_video2()
Definition: mpegvideo.h:147
FDCTDSPContext fdsp
Definition: mpegvideo.h:234
static void denoise_dct_c(MpegEncContext *s, int16_t *block)
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:56
uint8_t * buf_end
Definition: put_bits.h:38
static int frame_start(MpegEncContext *s)
float spatial_cplx_masking
spatial complexity masking (0-> disabled)
Definition: avcodec.h:1866
float rc_qmod_amp
Definition: mpegvideo.h:536
int luma_elim_threshold
Definition: mpegvideo.h:123
GLint GLenum type
Definition: opengl_enc.c:105
void ff_fix_long_p_mvs(MpegEncContext *s)
Definition: motion_est.c:1670
Picture * picture
main picture buffer
Definition: mpegvideo.h:143
int data_partitioning
data partitioning flag from header
Definition: mpegvideo.h:409
uint8_t * inter_ac_vlc_length
Definition: mpegvideo.h:322
int progressive_sequence
Definition: mpegvideo.h:459
uint16_t * intra_matrix
custom intra quantization matrix
Definition: avcodec.h:2074
h261codec.
void ff_h263_encode_gob_header(MpegEncContext *s, int mb_line)
Encode a group of blocks header.
Definition: ituh263enc.c:241
uint8_t * buf_ptr
Definition: put_bits.h:38
Describe the class of an AVClass context structure.
Definition: log.h:67
int stuffing_bits
bits used for stuffing
Definition: mpegvideo.h:346
op_pixels_func put_no_rnd_pixels_tab[4][4]
Halfpel motion compensation with no rounding (a+b)>>1.
Definition: hpeldsp.h:80
int16_t(*[2][2][2] b_field_mv_table)[2]
MV table (4MV per MB) interlaced b-frame encoding.
Definition: mpegvideo.h:262
int(* pix_norm1)(uint8_t *pix, int line_size)
int64_t mc_mb_var_sum
motion compensated MB variance for current frame
Definition: mpegpicture.h:82
int index
Definition: gxfenc.c:89
#define CANDIDATE_MB_TYPE_DIRECT
Definition: mpegutils.h:105
#define FF_DEFAULT_QUANT_BIAS
Definition: avcodec.h:2031
struct AVFrame * f
Definition: mpegpicture.h:46
static void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type)
const uint16_t ff_mpeg1_default_intra_matrix[256]
Definition: mpeg12data.c:30
int input_picture_number
used to set pic->display_picture_number, should not be used for/by anything else
Definition: mpegvideo.h:132
AVCodec ff_wmv1_encoder
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
av_cold void ff_pixblockdsp_init(PixblockDSPContext *c, AVCodecContext *avctx)
Definition: pixblockdsp.c:54
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:142
int mb_info
interval for outputting info about mb offsets as side data
Definition: mpegvideo.h:374
void ff_set_mpeg4_time(MpegEncContext *s)
static void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type)
#define STRIDE_ALIGN
Definition: internal.h:71
av_cold void ff_mjpeg_encode_close(MpegEncContext *s)
Definition: mjpegenc.c:116
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: utils.c:1361
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
Definition: utils.c:1804
int frame_skip_exp
frame skip exponent
Definition: avcodec.h:2709
#define CANDIDATE_MB_TYPE_BIDIR_I
Definition: mpegutils.h:113
const int16_t ff_mpeg4_default_intra_matrix[64]
Definition: mpeg4data.h:337
int f_code
forward MV resolution
Definition: mpegvideo.h:245
int ff_pre_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:1080
#define CANDIDATE_MB_TYPE_DIRECT0
Definition: mpegutils.h:115
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my)
Definition: mpeg4video.c:117
#define MAX_FCODE
Definition: mpegvideo.h:63
int ff_mjpeg_encode_stuffing(MpegEncContext *s)
static int weight(int i, int blen, int offset)
Definition: diracdec.c:1310
#define MV_DIR_FORWARD
Definition: mpegvideo.h:270
uint16_t * inter_matrix
custom inter quantization matrix
Definition: avcodec.h:2081
int max_b_frames
max number of b-frames for encoding
Definition: mpegvideo.h:122
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:219
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:245
void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
int last_mv_dir
last mv_dir, used for b frame encoding
Definition: mpegvideo.h:455
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:265
int h263_pred
use mpeg4/h263 ac/dc predictions
Definition: mpegvideo.h:112
int16_t(* b_bidir_back_mv_table)[2]
MV table (1MV per MB) bidir mode b-frame encoding.
Definition: mpegvideo.h:259
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:465
float dark_masking
darkness masking (0-> disabled)
Definition: avcodec.h:1880
static int64_t pts
Global timestamp for the audio frames.
float temporal_cplx_masking
temporary complexity masking (0-> disabled)
Definition: avcodec.h:1859
int ff_init_me(MpegEncContext *s)
Definition: motion_est.c:306
uint8_t *[2] p_field_select_table
Definition: mpegvideo.h:263
int16_t(* b_direct_mv_table)[2]
MV table (1MV per MB) direct mode b-frame encoding.
Definition: mpegvideo.h:260
#define AV_CODEC_FLAG_QPEL
Use qpel MC.
Definition: avcodec.h:747
AAN (Arai, Agui and Nakajima) (I)DCT tables.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:182
const uint8_t * c_dc_scale_table
qscale -> c_dc_scale table
Definition: mpegvideo.h:196
uint8_t level
Definition: svq3.c:150
#define FF_MB_DECISION_SIMPLE
uses mb_cmp
Definition: avcodec.h:2065
me_cmp_func sad[6]
Definition: me_cmp.h:56
int64_t mc_mb_var_sum_temp
Definition: motion_est.h:84
int mv[2][4][2]
motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second " : depend...
Definition: mpegvideo.h:284
int16_t(* b_forw_mv_table)[2]
MV table (1MV per MB) forward mode b-frame encoding.
Definition: mpegvideo.h:256
int b8_stride
2*mb_width+1 used for some 8x8 block arrays to allow simple addressing
Definition: mpegvideo.h:138
me_cmp_func sse[6]
Definition: me_cmp.h:57
int noise_reduction
noise reduction strength
Definition: avcodec.h:2096
static int estimate_motion_thread(AVCodecContext *c, void *arg)
int vbv_ignore_qmax
Definition: mpegvideo.h:542
#define BASIS_SHIFT
MpegEncContext.
Definition: mpegvideo.h:88
Picture * next_picture_ptr
pointer to the next picture (for bidir pred)
Definition: mpegvideo.h:190
char * rc_eq
Definition: mpegvideo.h:544
int8_t * qscale_table
Definition: mpegpicture.h:50
#define MAX_RUN
Definition: rl.h:34
struct AVCodecContext * avctx
Definition: mpegvideo.h:105
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:1717
PutBitContext pb
bit output
Definition: mpegvideo.h:158
static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
void ff_mpeg_unref_picture(AVCodecContext *avctx, Picture *pic)
Deallocate a picture.
Definition: mpegpicture.c:291
av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
#define CONFIG_MPEG4_ENCODER
Definition: config.h:1320
#define CONFIG_MPEG2VIDEO_ENCODER
Definition: config.h:1319
volatile int error_count
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:105
static void update_qscale(MpegEncContext *s)
int mb_cmp
macroblock comparison function (not supported yet)
Definition: avcodec.h:1931
int quantizer_noise_shaping
Definition: mpegvideo.h:529
int(* dct_error_sum)[64]
Definition: mpegvideo.h:339
MECmpContext mecc
Definition: mpegvideo.h:238
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:67
void ff_msmpeg4_encode_ext_header(MpegEncContext *s)
Definition: msmpeg4enc.c:284
float rc_initial_cplx
Definition: mpegvideo.h:538
static int pre_estimate_motion_thread(AVCodecContext *c, void *arg)
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:82
static const int32_t qmat16[MAT_SIZE]
Definition: hq_hqadata.c:342
#define FF_MB_DECISION_RD
rate distortion
Definition: avcodec.h:2067
common internal api header.
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11 ...
Definition: mpegvideo.h:137
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:101
if(ret< 0)
Definition: vf_mcdeint.c:280
#define CANDIDATE_MB_TYPE_FORWARD
Definition: mpegutils.h:106
uint8_t * dest[3]
Definition: mpegvideo.h:303
int shared
Definition: mpegpicture.h:88
static double c[64]
int last_pict_type
Definition: mpegvideo.h:221
#define CONFIG_H261_ENCODER
Definition: config.h:1309
#define COPY(a)
void ff_h263_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: ituh263enc.c:448
int adaptive_quant
use adaptive quantization
Definition: mpegvideo.h:216
static int16_t basis[64][64]
attribute_deprecated float border_masking
Definition: avcodec.h:2138
static int score_tab[256]
Definition: zmbvenc.c:59
Picture last_picture
copy of the previous picture structure.
Definition: mpegvideo.h:169
Picture * last_picture_ptr
pointer to the previous picture.
Definition: mpegvideo.h:189
Bi-dir predicted.
Definition: avutil.h:268
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:81
float rc_qsquish
ratecontrol qmin qmax limiting method 0-> clipping, 1-> use a nice continuous function to limit qscal...
Definition: mpegvideo.h:535
int64_t reordered_pts
reordered pts to be used as dts for the next output frame when there's a delay
Definition: mpegvideo.h:155
attribute_deprecated AVFrame * coded_frame
the picture in the bitstream
Definition: avcodec.h:3034
int ff_vbv_update(MpegEncContext *s, int frame_size)
Definition: ratecontrol.c:326
#define H263_GOB_HEIGHT(h)
Definition: h263.h:44
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Definition: avpacket.c:49
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:48
int den
denominator
Definition: rational.h:45
#define CONFIG_H263_ENCODER
Definition: config.h:1310
attribute_deprecated float rc_qmod_amp
Definition: avcodec.h:2581
#define CONFIG_H263P_ENCODER
Definition: config.h:1311
const uint8_t * chroma_qscale_table
qscale -> chroma_qscale (h263)
Definition: mpegvideo.h:197
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:71
AVCodec ff_msmpeg4v3_encoder
int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
Definition: mpegvideo.c:446
int trellis
trellis RD quantization
Definition: avcodec.h:2723
void(* dct_unquantize_mpeg1_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:510
static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:636
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:755
void ff_mpeg1_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: mpeg12enc.c:425
void ff_mpeg4_stuffing(PutBitContext *pbc)
add mpeg4 stuffing bits (01...1)
#define CANDIDATE_MB_TYPE_INTRA
Definition: mpegutils.h:100
int16_t(* blocks)[12][64]
Definition: mpegvideo.h:501
#define STOP_TIMER(id)
Definition: timer.h:93
int slices
Number of slices.
Definition: avcodec.h:2263
int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic_arg, int *got_packet)
void * priv_data
Definition: avcodec.h:1554
const AVOption ff_mpv_generic_options[]
Definition: mpegvideo_enc.c:82
#define PICT_FRAME
Definition: mpegutils.h:35
int last_bits
temp var used for calculating the above vars
Definition: mpegvideo.h:360
void ff_mpeg4_init_partitions(MpegEncContext *s)
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:867
void(* diff_pixels)(int16_t *block, const uint8_t *s1, const uint8_t *s2, int stride)
Definition: pixblockdsp.h:30
static av_always_inline int diff(const uint32_t a, const uint32_t b)
int picture_structure
Definition: mpegvideo.h:463
int dia_size
ME diamond size & shape.
Definition: avcodec.h:1960
#define av_free(p)
int b_sensitivity
Adjust sensitivity of b_frame_strategy 1.
Definition: avcodec.h:2219
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
Definition: avcodec.h:3083
VideoDSPContext vdsp
Definition: mpegvideo.h:243
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:83
#define VE
static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src)
void(* draw_edges)(uint8_t *buf, int wrap, int width, int height, int w, int h, int sides)
int ff_get_best_fcode(MpegEncContext *s, int16_t(*mv_table)[2], int type)
Definition: motion_est.c:1617
int resync_mb_y
y position of last resync marker
Definition: mpegvideo.h:364
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:1562
int16_t(* block)[64]
points to one of the following blocks
Definition: mpegvideo.h:500
void ff_mjpeg_encode_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mjpegenc.c:179
int64_t bit_rate
wanted bit rate
Definition: mpegvideo.h:110
PutBitContext tex_pb
used for data partitioned VOPs
Definition: mpegvideo.h:413
Picture next_picture
copy of the next picture structure.
Definition: mpegvideo.h:175
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:237
attribute_deprecated int error_rate
Definition: avcodec.h:3255
static void set_frame_distances(MpegEncContext *s)
static const double coeff[2][5]
Definition: vf_owdenoise.c:71
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:219
#define EDGE_BOTTOM
void ff_fix_long_mvs(MpegEncContext *s, uint8_t *field_select_table, int field_select, int16_t(*mv_table)[2], int f_code, int type, int truncate)
Definition: motion_est.c:1720
Picture ** reordered_input_picture
pointer to the next pictures in codedorder for encoding
Definition: mpegvideo.h:145
static const struct twinvq_data tab
unsigned int byte_buffer_size
Definition: internal.h:147
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> dc
void ff_rv20_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: rv20enc.c:35
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: avcodec.h:1432
void(* dct_unquantize_mpeg2_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:512
static int encode_thread(AVCodecContext *c, void *arg)
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding). ...
Definition: mpegvideo.c:625
int height
Definition: frame.h:220
int(* fast_dct_quantize)(struct MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo.h:525
void ff_mpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
Definition: mpegvideo.h:308
#define LOCAL_ALIGNED_16(t, v,...)
Definition: internal.h:123
static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src)
#define av_freep(p)
static void update_duplicate_context_after_me(MpegEncContext *dst, MpegEncContext *src)
void INT64 start
Definition: avisynth_c.h:553
#define av_always_inline
Definition: attributes.h:37
#define M_PI
Definition: mathematics.h:46
int ff_mpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
#define AV_CODEC_FLAG_CLOSED_GOP
Allow non spec compliant speedup tricks.
Definition: avcodec.h:801
Floating point AAN DCT
static uint8_t default_mv_penalty[MAX_FCODE+1][MAX_DMV *2+1]
Definition: mpegvideo_enc.c:79
int inter_quant_bias
bias for the quantizer
Definition: mpegvideo.h:314
av_cold void ff_qpeldsp_init(QpelDSPContext *c)
Definition: qpeldsp.c:783
attribute_deprecated int lmin
Definition: avcodec.h:2681
#define CANDIDATE_MB_TYPE_BACKWARD
Definition: mpegutils.h:107
#define stride
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int size)
Allocate new information of a packet.
Definition: avpacket.c:303
int(* dct_quantize)(struct MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo.h:524
#define MV_TYPE_8X8
4 vectors (h263, mpeg4 4MV)
Definition: mpegvideo.h:275
int rc_min_rate
minimum bitrate
Definition: avcodec.h:2621
int b_code
backward MV resolution for B Frames (mpeg4)
Definition: mpegvideo.h:246
void ff_msmpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: msmpeg4enc.c:381
void ff_h261_encode_init(MpegEncContext *s)
Definition: h261enc.c:365
int dct_count[2]
Definition: mpegvideo.h:340
int64_t mb_var_sum
sum of MB variance for current frame
Definition: mpegpicture.h:81
static int encode_frame(AVCodecContext *c, AVFrame *frame)
AVPixelFormat
Pixel format.
Definition: pixfmt.h:65
This structure stores compressed data.
Definition: avcodec.h:1410
uint8_t * byte_buffer
temporary buffer used for encoders to store their bitstream
Definition: internal.h:146
int delay
Codec delay.
Definition: avcodec.h:1674
int strict_std_compliance
strictly follow the standard (MPEG4, ...).
Definition: avcodec.h:2830
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1426
int ff_check_alignment(void)
Definition: me_cmp.c:915
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:554
#define FF_ALLOCZ_OR_GOTO(ctx, p, size, label)
Definition: internal.h:141
int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
Definition: utils.c:3636
me_cmp_func ildct_cmp[6]
Definition: me_cmp.h:75
#define FFMAX3(a, b, c)
Definition: common.h:91
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:240
static void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type, PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2], int *dmin, int *next_block, int motion_x, int motion_y)
Predicted.
Definition: avutil.h:267
unsigned int lambda
lagrange multipler used in rate distortion
Definition: mpegvideo.h:213
AVCodec ff_msmpeg4v2_encoder
uint16_t pb_time
time distance between the last b and p,s,i frame
Definition: mpegvideo.h:398
enum idct_permutation_type perm_type
Definition: idctdsp.h:95
HpelDSPContext hdsp
Definition: mpegvideo.h:236
static const uint8_t sp5x_quant_table[20][64]
Definition: sp5x.h:135
int next_lambda
next lambda used for retrying to encode a frame
Definition: mpegvideo.h:347
static int16_t block[64]
Definition: dct-test.c:110