FFmpeg  3.4.9
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdatomic.h>
34 #include <stdint.h>
35 
36 #if HAVE_IO_H
37 #include <io.h>
38 #endif
39 #if HAVE_UNISTD_H
40 #include <unistd.h>
41 #endif
42 
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
46 #include "libavutil/opt.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
65 #include "libavcodec/mathops.h"
66 #include "libavformat/os_support.h"
67 
68 # include "libavfilter/avfilter.h"
69 # include "libavfilter/buffersrc.h"
70 # include "libavfilter/buffersink.h"
71 
72 #if HAVE_SYS_RESOURCE_H
73 #include <sys/time.h>
74 #include <sys/types.h>
75 #include <sys/resource.h>
76 #elif HAVE_GETPROCESSTIMES
77 #include <windows.h>
78 #endif
79 #if HAVE_GETPROCESSMEMORYINFO
80 #include <windows.h>
81 #include <psapi.h>
82 #endif
83 #if HAVE_SETCONSOLECTRLHANDLER
84 #include <windows.h>
85 #endif
86 
87 
88 #if HAVE_SYS_SELECT_H
89 #include <sys/select.h>
90 #endif
91 
92 #if HAVE_TERMIOS_H
93 #include <fcntl.h>
94 #include <sys/ioctl.h>
95 #include <sys/time.h>
96 #include <termios.h>
97 #elif HAVE_KBHIT
98 #include <conio.h>
99 #endif
100 
101 #if HAVE_PTHREADS
102 #include <pthread.h>
103 #endif
104 
105 #include <time.h>
106 
107 #include "ffmpeg.h"
108 #include "cmdutils.h"
109 
110 #include "libavutil/avassert.h"
111 
112 const char program_name[] = "ffmpeg";
113 const int program_birth_year = 2000;
114 
115 static FILE *vstats_file;
116 
117 const char *const forced_keyframes_const_names[] = {
118  "n",
119  "n_forced",
120  "prev_forced_n",
121  "prev_forced_t",
122  "t",
123  NULL
124 };
125 
126 static void do_video_stats(OutputStream *ost, int frame_size);
127 static int64_t getutime(void);
128 static int64_t getmaxrss(void);
130 
131 static int run_as_daemon = 0;
132 static int nb_frames_dup = 0;
133 static unsigned dup_warning = 1000;
134 static int nb_frames_drop = 0;
135 static int64_t decode_error_stat[2];
136 
137 static int want_sdp = 1;
138 
139 static int current_time;
141 
143 
148 
153 
156 
157 #if HAVE_TERMIOS_H
158 
159 /* init terminal so that we can grab keys */
160 static struct termios oldtty;
161 static int restore_tty;
162 #endif
163 
164 #if HAVE_PTHREADS
165 static void free_input_threads(void);
166 #endif
167 
168 /* sub2video hack:
169  Convert subtitles to video with alpha to insert them in filter graphs.
170  This is a temporary solution until libavfilter gets real subtitles support.
171  */
172 
174 {
175  int ret;
176  AVFrame *frame = ist->sub2video.frame;
177 
178  av_frame_unref(frame);
179  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
180  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
182  if ((ret = av_frame_get_buffer(frame, 32)) < 0)
183  return ret;
184  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
185  return 0;
186 }
187 
188 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
189  AVSubtitleRect *r)
190 {
191  uint32_t *pal, *dst2;
192  uint8_t *src, *src2;
193  int x, y;
194 
195  if (r->type != SUBTITLE_BITMAP) {
196  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
197  return;
198  }
199  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
200  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
201  r->x, r->y, r->w, r->h, w, h
202  );
203  return;
204  }
205 
206  dst += r->y * dst_linesize + r->x * 4;
207  src = r->data[0];
208  pal = (uint32_t *)r->data[1];
209  for (y = 0; y < r->h; y++) {
210  dst2 = (uint32_t *)dst;
211  src2 = src;
212  for (x = 0; x < r->w; x++)
213  *(dst2++) = pal[*(src2++)];
214  dst += dst_linesize;
215  src += r->linesize[0];
216  }
217 }
218 
219 static void sub2video_push_ref(InputStream *ist, int64_t pts)
220 {
221  AVFrame *frame = ist->sub2video.frame;
222  int i;
223 
224  av_assert1(frame->data[0]);
225  ist->sub2video.last_pts = frame->pts = pts;
226  for (i = 0; i < ist->nb_filters; i++)
230 }
231 
233 {
234  AVFrame *frame = ist->sub2video.frame;
235  int8_t *dst;
236  int dst_linesize;
237  int num_rects, i;
238  int64_t pts, end_pts;
239 
240  if (!frame)
241  return;
242  if (sub) {
243  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
244  AV_TIME_BASE_Q, ist->st->time_base);
245  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
246  AV_TIME_BASE_Q, ist->st->time_base);
247  num_rects = sub->num_rects;
248  } else {
249  pts = ist->sub2video.end_pts;
250  end_pts = INT64_MAX;
251  num_rects = 0;
252  }
253  if (sub2video_get_blank_frame(ist) < 0) {
255  "Impossible to get a blank canvas.\n");
256  return;
257  }
258  dst = frame->data [0];
259  dst_linesize = frame->linesize[0];
260  for (i = 0; i < num_rects; i++)
261  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
262  sub2video_push_ref(ist, pts);
263  ist->sub2video.end_pts = end_pts;
264 }
265 
266 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
267 {
268  InputFile *infile = input_files[ist->file_index];
269  int i, j, nb_reqs;
270  int64_t pts2;
271 
272  /* When a frame is read from a file, examine all sub2video streams in
273  the same file and send the sub2video frame again. Otherwise, decoded
274  video frames could be accumulating in the filter graph while a filter
275  (possibly overlay) is desperately waiting for a subtitle frame. */
276  for (i = 0; i < infile->nb_streams; i++) {
277  InputStream *ist2 = input_streams[infile->ist_index + i];
278  if (!ist2->sub2video.frame)
279  continue;
280  /* subtitles seem to be usually muxed ahead of other streams;
281  if not, subtracting a larger time here is necessary */
282  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
283  /* do not send the heartbeat frame if the subtitle is already ahead */
284  if (pts2 <= ist2->sub2video.last_pts)
285  continue;
286  if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
287  sub2video_update(ist2, NULL);
288  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
289  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
290  if (nb_reqs)
291  sub2video_push_ref(ist2, pts2);
292  }
293 }
294 
295 static void sub2video_flush(InputStream *ist)
296 {
297  int i;
298 
299  if (ist->sub2video.end_pts < INT64_MAX)
300  sub2video_update(ist, NULL);
301  for (i = 0; i < ist->nb_filters; i++)
303 }
304 
305 /* end of sub2video hack */
306 
307 static void term_exit_sigsafe(void)
308 {
309 #if HAVE_TERMIOS_H
310  if(restore_tty)
311  tcsetattr (0, TCSANOW, &oldtty);
312 #endif
313 }
314 
315 void term_exit(void)
316 {
317  av_log(NULL, AV_LOG_QUIET, "%s", "");
319 }
320 
321 static volatile int received_sigterm = 0;
322 static volatile int received_nb_signals = 0;
324 static volatile int ffmpeg_exited = 0;
325 static int main_return_code = 0;
326 
327 static void
329 {
330  received_sigterm = sig;
333  if(received_nb_signals > 3) {
334  write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
335  strlen("Received > 3 system signals, hard exiting\n"));
336 
337  exit(123);
338  }
339 }
340 
341 #if HAVE_SETCONSOLECTRLHANDLER
342 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
343 {
344  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
345 
346  switch (fdwCtrlType)
347  {
348  case CTRL_C_EVENT:
349  case CTRL_BREAK_EVENT:
350  sigterm_handler(SIGINT);
351  return TRUE;
352 
353  case CTRL_CLOSE_EVENT:
354  case CTRL_LOGOFF_EVENT:
355  case CTRL_SHUTDOWN_EVENT:
356  sigterm_handler(SIGTERM);
357  /* Basically, with these 3 events, when we return from this method the
358  process is hard terminated, so stall as long as we need to
359  to try and let the main thread(s) clean up and gracefully terminate
360  (we have at most 5 seconds, but should be done far before that). */
361  while (!ffmpeg_exited) {
362  Sleep(0);
363  }
364  return TRUE;
365 
366  default:
367  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
368  return FALSE;
369  }
370 }
371 #endif
372 
373 void term_init(void)
374 {
375 #if HAVE_TERMIOS_H
377  struct termios tty;
378  if (tcgetattr (0, &tty) == 0) {
379  oldtty = tty;
380  restore_tty = 1;
381 
382  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
383  |INLCR|IGNCR|ICRNL|IXON);
384  tty.c_oflag |= OPOST;
385  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
386  tty.c_cflag &= ~(CSIZE|PARENB);
387  tty.c_cflag |= CS8;
388  tty.c_cc[VMIN] = 1;
389  tty.c_cc[VTIME] = 0;
390 
391  tcsetattr (0, TCSANOW, &tty);
392  }
393  signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
394  }
395 #endif
396 
397  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
398  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
399 #ifdef SIGXCPU
400  signal(SIGXCPU, sigterm_handler);
401 #endif
402 #if HAVE_SETCONSOLECTRLHANDLER
403  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
404 #endif
405 }
406 
407 /* read a key without blocking */
408 static int read_key(void)
409 {
410  unsigned char ch;
411 #if HAVE_TERMIOS_H
412  int n = 1;
413  struct timeval tv;
414  fd_set rfds;
415 
416  FD_ZERO(&rfds);
417  FD_SET(0, &rfds);
418  tv.tv_sec = 0;
419  tv.tv_usec = 0;
420  n = select(1, &rfds, NULL, NULL, &tv);
421  if (n > 0) {
422  n = read(0, &ch, 1);
423  if (n == 1)
424  return ch;
425 
426  return n;
427  }
428 #elif HAVE_KBHIT
429 # if HAVE_PEEKNAMEDPIPE
430  static int is_pipe;
431  static HANDLE input_handle;
432  DWORD dw, nchars;
433  if(!input_handle){
434  input_handle = GetStdHandle(STD_INPUT_HANDLE);
435  is_pipe = !GetConsoleMode(input_handle, &dw);
436  }
437 
438  if (is_pipe) {
439  /* When running under a GUI, you will end here. */
440  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
441  // input pipe may have been closed by the program that ran ffmpeg
442  return -1;
443  }
444  //Read it
445  if(nchars != 0) {
446  read(0, &ch, 1);
447  return ch;
448  }else{
449  return -1;
450  }
451  }
452 # endif
453  if(kbhit())
454  return(getch());
455 #endif
456  return -1;
457 }
458 
459 static int decode_interrupt_cb(void *ctx)
460 {
462 }
463 
465 
466 static void ffmpeg_cleanup(int ret)
467 {
468  int i, j;
469 
470  if (do_benchmark) {
471  int maxrss = getmaxrss() / 1024;
472  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
473  }
474 
475  for (i = 0; i < nb_filtergraphs; i++) {
476  FilterGraph *fg = filtergraphs[i];
478  for (j = 0; j < fg->nb_inputs; j++) {
479  while (av_fifo_size(fg->inputs[j]->frame_queue)) {
480  AVFrame *frame;
481  av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame,
482  sizeof(frame), NULL);
483  av_frame_free(&frame);
484  }
485  av_fifo_freep(&fg->inputs[j]->frame_queue);
486  if (fg->inputs[j]->ist->sub2video.sub_queue) {
487  while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) {
488  AVSubtitle sub;
490  &sub, sizeof(sub), NULL);
491  avsubtitle_free(&sub);
492  }
494  }
496  av_freep(&fg->inputs[j]->name);
497  av_freep(&fg->inputs[j]);
498  }
499  av_freep(&fg->inputs);
500  for (j = 0; j < fg->nb_outputs; j++) {
501  av_freep(&fg->outputs[j]->name);
502  av_freep(&fg->outputs[j]->formats);
503  av_freep(&fg->outputs[j]->channel_layouts);
504  av_freep(&fg->outputs[j]->sample_rates);
505  av_freep(&fg->outputs[j]);
506  }
507  av_freep(&fg->outputs);
508  av_freep(&fg->graph_desc);
509 
510  av_freep(&filtergraphs[i]);
511  }
512  av_freep(&filtergraphs);
513 
515 
516  /* close files */
517  for (i = 0; i < nb_output_files; i++) {
518  OutputFile *of = output_files[i];
520  if (!of)
521  continue;
522  s = of->ctx;
523  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
524  avio_closep(&s->pb);
526  av_dict_free(&of->opts);
527 
528  av_freep(&output_files[i]);
529  }
530  for (i = 0; i < nb_output_streams; i++) {
531  OutputStream *ost = output_streams[i];
532 
533  if (!ost)
534  continue;
535 
536  for (j = 0; j < ost->nb_bitstream_filters; j++)
537  av_bsf_free(&ost->bsf_ctx[j]);
538  av_freep(&ost->bsf_ctx);
539 
541  av_frame_free(&ost->last_frame);
542  av_dict_free(&ost->encoder_opts);
543 
544  av_parser_close(ost->parser);
546 
547  av_freep(&ost->forced_keyframes);
549  av_freep(&ost->avfilter);
550  av_freep(&ost->logfile_prefix);
551 
553  ost->audio_channels_mapped = 0;
554 
555  av_dict_free(&ost->sws_dict);
556  av_dict_free(&ost->swr_opts);
557 
560 
561  if (ost->muxing_queue) {
562  while (av_fifo_size(ost->muxing_queue)) {
563  AVPacket pkt;
564  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
565  av_packet_unref(&pkt);
566  }
568  }
569 
570  av_freep(&output_streams[i]);
571  }
572 #if HAVE_PTHREADS
573  free_input_threads();
574 #endif
575  for (i = 0; i < nb_input_files; i++) {
576  avformat_close_input(&input_files[i]->ctx);
577  av_freep(&input_files[i]);
578  }
579  for (i = 0; i < nb_input_streams; i++) {
580  InputStream *ist = input_streams[i];
581 
584  av_dict_free(&ist->decoder_opts);
587  av_freep(&ist->filters);
588  av_freep(&ist->hwaccel_device);
589  av_freep(&ist->dts_buffer);
590 
592 
593  av_freep(&input_streams[i]);
594  }
595 
596  if (vstats_file) {
597  if (fclose(vstats_file))
599  "Error closing vstats file, loss of information possible: %s\n",
600  av_err2str(AVERROR(errno)));
601  }
603 
604  av_freep(&input_streams);
605  av_freep(&input_files);
606  av_freep(&output_streams);
607  av_freep(&output_files);
608 
609  uninit_opts();
610 
612 
613  if (received_sigterm) {
614  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
615  (int) received_sigterm);
616  } else if (ret && atomic_load(&transcode_init_done)) {
617  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
618  }
619  term_exit();
620  ffmpeg_exited = 1;
621 }
622 
624 {
625  AVDictionaryEntry *t = NULL;
626 
627  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
629  }
630 }
631 
633 {
635  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
636  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
637  exit_program(1);
638  }
639 }
640 
641 static void abort_codec_experimental(AVCodec *c, int encoder)
642 {
643  exit_program(1);
644 }
645 
646 static void update_benchmark(const char *fmt, ...)
647 {
648  if (do_benchmark_all) {
649  int64_t t = getutime();
650  va_list va;
651  char buf[1024];
652 
653  if (fmt) {
654  va_start(va, fmt);
655  vsnprintf(buf, sizeof(buf), fmt, va);
656  va_end(va);
657  av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
658  }
659  current_time = t;
660  }
661 }
662 
663 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
664 {
665  int i;
666  for (i = 0; i < nb_output_streams; i++) {
667  OutputStream *ost2 = output_streams[i];
668  ost2->finished |= ost == ost2 ? this_stream : others;
669  }
670 }
671 
672 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
673 {
674  AVFormatContext *s = of->ctx;
675  AVStream *st = ost->st;
676  int ret;
677 
678  /*
679  * Audio encoders may split the packets -- #frames in != #packets out.
680  * But there is no reordering, so we can limit the number of output packets
681  * by simply dropping them here.
682  * Counting encoded video frames needs to be done separately because of
683  * reordering, see do_video_out().
684  * Do not count the packet when unqueued because it has been counted when queued.
685  */
686  if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
687  if (ost->frame_number >= ost->max_frames) {
688  av_packet_unref(pkt);
689  return;
690  }
691  ost->frame_number++;
692  }
693 
694  if (!of->header_written) {
695  AVPacket tmp_pkt = {0};
696  /* the muxer is not initialized yet, buffer the packet */
697  if (!av_fifo_space(ost->muxing_queue)) {
698  int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
699  ost->max_muxing_queue_size);
700  if (new_size <= av_fifo_size(ost->muxing_queue)) {
702  "Too many packets buffered for output stream %d:%d.\n",
703  ost->file_index, ost->st->index);
704  exit_program(1);
705  }
706  ret = av_fifo_realloc2(ost->muxing_queue, new_size);
707  if (ret < 0)
708  exit_program(1);
709  }
710  ret = av_packet_ref(&tmp_pkt, pkt);
711  if (ret < 0)
712  exit_program(1);
713  av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
714  av_packet_unref(pkt);
715  return;
716  }
717 
720  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
721 
722  if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
723  int i;
725  NULL);
726  ost->quality = sd ? AV_RL32(sd) : -1;
727  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
728 
729  for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
730  if (sd && i < sd[5])
731  ost->error[i] = AV_RL64(sd + 8 + 8*i);
732  else
733  ost->error[i] = -1;
734  }
735 
736  if (ost->frame_rate.num && ost->is_cfr) {
737  if (pkt->duration > 0)
738  av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
739  pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
740  ost->mux_timebase);
741  }
742  }
743 
744  av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
745 
746  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
747  if (pkt->dts != AV_NOPTS_VALUE &&
748  pkt->pts != AV_NOPTS_VALUE &&
749  pkt->dts > pkt->pts) {
750  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
751  pkt->dts, pkt->pts,
752  ost->file_index, ost->st->index);
753  pkt->pts =
754  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
755  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
756  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
757  }
759  pkt->dts != AV_NOPTS_VALUE &&
760  !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
761  ost->last_mux_dts != AV_NOPTS_VALUE) {
762  int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
763  if (pkt->dts < max) {
764  int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
765  av_log(s, loglevel, "Non-monotonous DTS in output stream "
766  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
767  ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
768  if (exit_on_error) {
769  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
770  exit_program(1);
771  }
772  av_log(s, loglevel, "changing to %"PRId64". This may result "
773  "in incorrect timestamps in the output file.\n",
774  max);
775  if (pkt->pts >= pkt->dts)
776  pkt->pts = FFMAX(pkt->pts, max);
777  pkt->dts = max;
778  }
779  }
780  }
781  ost->last_mux_dts = pkt->dts;
782 
783  ost->data_size += pkt->size;
784  ost->packets_written++;
785 
786  pkt->stream_index = ost->index;
787 
788  if (debug_ts) {
789  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
790  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
792  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
793  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
794  pkt->size
795  );
796  }
797 
798  ret = av_interleaved_write_frame(s, pkt);
799  if (ret < 0) {
800  print_error("av_interleaved_write_frame()", ret);
801  main_return_code = 1;
803  }
804  av_packet_unref(pkt);
805 }
806 
808 {
809  OutputFile *of = output_files[ost->file_index];
810 
811  ost->finished |= ENCODER_FINISHED;
812  if (of->shortest) {
813  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
814  of->recording_time = FFMIN(of->recording_time, end);
815  }
816 }
817 
818 /*
819  * Send a single packet to the output, applying any bitstream filters
820  * associated with the output stream. This may result in any number
821  * of packets actually being written, depending on what bitstream
822  * filters are applied. The supplied packet is consumed and will be
823  * blank (as if newly-allocated) when this function returns.
824  *
825  * If eof is set, instead indicate EOF to all bitstream filters and
826  * therefore flush any delayed packets to the output. A blank packet
827  * must be supplied in this case.
828  */
830  OutputStream *ost, int eof)
831 {
832  int ret = 0;
833 
834  /* apply the output bitstream filters, if any */
835  if (ost->nb_bitstream_filters) {
836  int idx;
837 
838  ret = av_bsf_send_packet(ost->bsf_ctx[0], eof ? NULL : pkt);
839  if (ret < 0)
840  goto finish;
841 
842  eof = 0;
843  idx = 1;
844  while (idx) {
845  /* get a packet from the previous filter up the chain */
846  ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
847  if (ret == AVERROR(EAGAIN)) {
848  ret = 0;
849  idx--;
850  continue;
851  } else if (ret == AVERROR_EOF) {
852  eof = 1;
853  } else if (ret < 0)
854  goto finish;
855 
856  /* send it to the next filter down the chain or to the muxer */
857  if (idx < ost->nb_bitstream_filters) {
858  ret = av_bsf_send_packet(ost->bsf_ctx[idx], eof ? NULL : pkt);
859  if (ret < 0)
860  goto finish;
861  idx++;
862  eof = 0;
863  } else if (eof)
864  goto finish;
865  else
866  write_packet(of, pkt, ost, 0);
867  }
868  } else if (!eof)
869  write_packet(of, pkt, ost, 0);
870 
871 finish:
872  if (ret < 0 && ret != AVERROR_EOF) {
873  av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
874  "packet for stream #%d:%d.\n", ost->file_index, ost->index);
875  if(exit_on_error)
876  exit_program(1);
877  }
878 }
879 
881 {
882  OutputFile *of = output_files[ost->file_index];
883 
884  if (of->recording_time != INT64_MAX &&
886  AV_TIME_BASE_Q) >= 0) {
887  close_output_stream(ost);
888  return 0;
889  }
890  return 1;
891 }
892 
893 static void do_audio_out(OutputFile *of, OutputStream *ost,
894  AVFrame *frame)
895 {
896  AVCodecContext *enc = ost->enc_ctx;
897  AVPacket pkt;
898  int ret;
899 
900  av_init_packet(&pkt);
901  pkt.data = NULL;
902  pkt.size = 0;
903 
904  if (!check_recording_time(ost))
905  return;
906 
907  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
908  frame->pts = ost->sync_opts;
909  ost->sync_opts = frame->pts + frame->nb_samples;
910  ost->samples_encoded += frame->nb_samples;
911  ost->frames_encoded++;
912 
913  av_assert0(pkt.size || !pkt.data);
915  if (debug_ts) {
916  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
917  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
918  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
919  enc->time_base.num, enc->time_base.den);
920  }
921 
922  ret = avcodec_send_frame(enc, frame);
923  if (ret < 0)
924  goto error;
925 
926  while (1) {
927  ret = avcodec_receive_packet(enc, &pkt);
928  if (ret == AVERROR(EAGAIN))
929  break;
930  if (ret < 0)
931  goto error;
932 
933  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
934 
935  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
936 
937  if (debug_ts) {
938  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
939  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
940  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
941  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
942  }
943 
944  output_packet(of, &pkt, ost, 0);
945  }
946 
947  return;
948 error:
949  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
950  exit_program(1);
951 }
952 
953 static void do_subtitle_out(OutputFile *of,
954  OutputStream *ost,
955  AVSubtitle *sub)
956 {
957  int subtitle_out_max_size = 1024 * 1024;
958  int subtitle_out_size, nb, i;
959  AVCodecContext *enc;
960  AVPacket pkt;
961  int64_t pts;
962 
963  if (sub->pts == AV_NOPTS_VALUE) {
964  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
965  if (exit_on_error)
966  exit_program(1);
967  return;
968  }
969 
970  enc = ost->enc_ctx;
971 
972  if (!subtitle_out) {
973  subtitle_out = av_malloc(subtitle_out_max_size);
974  if (!subtitle_out) {
975  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
976  exit_program(1);
977  }
978  }
979 
980  /* Note: DVB subtitle need one packet to draw them and one other
981  packet to clear them */
982  /* XXX: signal it in the codec context ? */
984  nb = 2;
985  else
986  nb = 1;
987 
988  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
989  pts = sub->pts;
990  if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
991  pts -= output_files[ost->file_index]->start_time;
992  for (i = 0; i < nb; i++) {
993  unsigned save_num_rects = sub->num_rects;
994 
995  ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
996  if (!check_recording_time(ost))
997  return;
998 
999  sub->pts = pts;
1000  // start_display_time is required to be 0
1001  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1002  sub->end_display_time -= sub->start_display_time;
1003  sub->start_display_time = 0;
1004  if (i == 1)
1005  sub->num_rects = 0;
1006 
1007  ost->frames_encoded++;
1008 
1009  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1010  subtitle_out_max_size, sub);
1011  if (i == 1)
1012  sub->num_rects = save_num_rects;
1013  if (subtitle_out_size < 0) {
1014  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1015  exit_program(1);
1016  }
1017 
1018  av_init_packet(&pkt);
1019  pkt.data = subtitle_out;
1020  pkt.size = subtitle_out_size;
1021  pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1022  pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1023  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1024  /* XXX: the pts correction is handled here. Maybe handling
1025  it in the codec would be better */
1026  if (i == 0)
1027  pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1028  else
1029  pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1030  }
1031  pkt.dts = pkt.pts;
1032  output_packet(of, &pkt, ost, 0);
1033  }
1034 }
1035 
1036 static void do_video_out(OutputFile *of,
1037  OutputStream *ost,
1038  AVFrame *next_picture,
1039  double sync_ipts)
1040 {
1041  int ret, format_video_sync;
1042  AVPacket pkt;
1043  AVCodecContext *enc = ost->enc_ctx;
1044  AVCodecParameters *mux_par = ost->st->codecpar;
1045  AVRational frame_rate;
1046  int nb_frames, nb0_frames, i;
1047  double delta, delta0;
1048  double duration = 0;
1049  int frame_size = 0;
1050  InputStream *ist = NULL;
1052 
1053  if (ost->source_index >= 0)
1054  ist = input_streams[ost->source_index];
1055 
1056  frame_rate = av_buffersink_get_frame_rate(filter);
1057  if (frame_rate.num > 0 && frame_rate.den > 0)
1058  duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1059 
1060  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1061  duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1062 
1063  if (!ost->filters_script &&
1064  !ost->filters &&
1065  next_picture &&
1066  ist &&
1067  lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1068  duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1069  }
1070 
1071  if (!next_picture) {
1072  //end, flushing
1073  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1074  ost->last_nb0_frames[1],
1075  ost->last_nb0_frames[2]);
1076  } else {
1077  delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1078  delta = delta0 + duration;
1079 
1080  /* by default, we output a single frame */
1081  nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1082  nb_frames = 1;
1083 
1084  format_video_sync = video_sync_method;
1085  if (format_video_sync == VSYNC_AUTO) {
1086  if(!strcmp(of->ctx->oformat->name, "avi")) {
1087  format_video_sync = VSYNC_VFR;
1088  } else
1089  format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1090  if ( ist
1091  && format_video_sync == VSYNC_CFR
1092  && input_files[ist->file_index]->ctx->nb_streams == 1
1093  && input_files[ist->file_index]->input_ts_offset == 0) {
1094  format_video_sync = VSYNC_VSCFR;
1095  }
1096  if (format_video_sync == VSYNC_CFR && copy_ts) {
1097  format_video_sync = VSYNC_VSCFR;
1098  }
1099  }
1100  ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1101 
1102  if (delta0 < 0 &&
1103  delta > 0 &&
1104  format_video_sync != VSYNC_PASSTHROUGH &&
1105  format_video_sync != VSYNC_DROP) {
1106  if (delta0 < -0.6) {
1107  av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1108  } else
1109  av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1110  sync_ipts = ost->sync_opts;
1111  duration += delta0;
1112  delta0 = 0;
1113  }
1114 
1115  switch (format_video_sync) {
1116  case VSYNC_VSCFR:
1117  if (ost->frame_number == 0 && delta0 >= 0.5) {
1118  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1119  delta = duration;
1120  delta0 = 0;
1121  ost->sync_opts = lrint(sync_ipts);
1122  }
1123  case VSYNC_CFR:
1124  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1125  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1126  nb_frames = 0;
1127  } else if (delta < -1.1)
1128  nb_frames = 0;
1129  else if (delta > 1.1) {
1130  nb_frames = lrintf(delta);
1131  if (delta0 > 1.1)
1132  nb0_frames = lrintf(delta0 - 0.6);
1133  }
1134  break;
1135  case VSYNC_VFR:
1136  if (delta <= -0.6)
1137  nb_frames = 0;
1138  else if (delta > 0.6)
1139  ost->sync_opts = lrint(sync_ipts);
1140  break;
1141  case VSYNC_DROP:
1142  case VSYNC_PASSTHROUGH:
1143  ost->sync_opts = lrint(sync_ipts);
1144  break;
1145  default:
1146  av_assert0(0);
1147  }
1148  }
1149 
1150  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1151  nb0_frames = FFMIN(nb0_frames, nb_frames);
1152 
1153  memmove(ost->last_nb0_frames + 1,
1154  ost->last_nb0_frames,
1155  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1156  ost->last_nb0_frames[0] = nb0_frames;
1157 
1158  if (nb0_frames == 0 && ost->last_dropped) {
1159  nb_frames_drop++;
1161  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1162  ost->frame_number, ost->st->index, ost->last_frame->pts);
1163  }
1164  if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1165  if (nb_frames > dts_error_threshold * 30) {
1166  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1167  nb_frames_drop++;
1168  return;
1169  }
1170  nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1171  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1172  if (nb_frames_dup > dup_warning) {
1173  av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1174  dup_warning *= 10;
1175  }
1176  }
1177  ost->last_dropped = nb_frames == nb0_frames && next_picture;
1178 
1179  /* duplicates frame if needed */
1180  for (i = 0; i < nb_frames; i++) {
1181  AVFrame *in_picture;
1182  av_init_packet(&pkt);
1183  pkt.data = NULL;
1184  pkt.size = 0;
1185 
1186  if (i < nb0_frames && ost->last_frame) {
1187  in_picture = ost->last_frame;
1188  } else
1189  in_picture = next_picture;
1190 
1191  if (!in_picture)
1192  return;
1193 
1194  in_picture->pts = ost->sync_opts;
1195 
1196 #if 1
1197  if (!check_recording_time(ost))
1198 #else
1199  if (ost->frame_number >= ost->max_frames)
1200 #endif
1201  return;
1202 
1203 #if FF_API_LAVF_FMT_RAWPICTURE
1204  if (of->ctx->oformat->flags & AVFMT_RAWPICTURE &&
1205  enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1206  /* raw pictures are written as AVPicture structure to
1207  avoid any copies. We support temporarily the older
1208  method. */
1209  if (in_picture->interlaced_frame)
1210  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1211  else
1212  mux_par->field_order = AV_FIELD_PROGRESSIVE;
1213  pkt.data = (uint8_t *)in_picture;
1214  pkt.size = sizeof(AVPicture);
1215  pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->mux_timebase);
1216  pkt.flags |= AV_PKT_FLAG_KEY;
1217 
1218  output_packet(of, &pkt, ost, 0);
1219  } else
1220 #endif
1221  {
1222  int forced_keyframe = 0;
1223  double pts_time;
1224 
1226  ost->top_field_first >= 0)
1227  in_picture->top_field_first = !!ost->top_field_first;
1228 
1229  if (in_picture->interlaced_frame) {
1230  if (enc->codec->id == AV_CODEC_ID_MJPEG)
1231  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1232  else
1233  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1234  } else
1235  mux_par->field_order = AV_FIELD_PROGRESSIVE;
1236 
1237  in_picture->quality = enc->global_quality;
1238  in_picture->pict_type = 0;
1239 
1240  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1241  in_picture->pts * av_q2d(enc->time_base) : NAN;
1242  if (ost->forced_kf_index < ost->forced_kf_count &&
1243  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1244  ost->forced_kf_index++;
1245  forced_keyframe = 1;
1246  } else if (ost->forced_keyframes_pexpr) {
1247  double res;
1248  ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1251  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1257  res);
1258  if (res) {
1259  forced_keyframe = 1;
1265  }
1266 
1268  } else if ( ost->forced_keyframes
1269  && !strncmp(ost->forced_keyframes, "source", 6)
1270  && in_picture->key_frame==1) {
1271  forced_keyframe = 1;
1272  }
1273 
1274  if (forced_keyframe) {
1275  in_picture->pict_type = AV_PICTURE_TYPE_I;
1276  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1277  }
1278 
1280  if (debug_ts) {
1281  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1282  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1283  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1284  enc->time_base.num, enc->time_base.den);
1285  }
1286 
1287  ost->frames_encoded++;
1288 
1289  ret = avcodec_send_frame(enc, in_picture);
1290  if (ret < 0)
1291  goto error;
1292 
1293  while (1) {
1294  ret = avcodec_receive_packet(enc, &pkt);
1295  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1296  if (ret == AVERROR(EAGAIN))
1297  break;
1298  if (ret < 0)
1299  goto error;
1300 
1301  if (debug_ts) {
1302  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1303  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1304  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1305  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1306  }
1307 
1308  if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1309  pkt.pts = ost->sync_opts;
1310 
1311  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1312 
1313  if (debug_ts) {
1314  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1315  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1316  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1317  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1318  }
1319 
1320  frame_size = pkt.size;
1321  output_packet(of, &pkt, ost, 0);
1322 
1323  /* if two pass, output log */
1324  if (ost->logfile && enc->stats_out) {
1325  fprintf(ost->logfile, "%s", enc->stats_out);
1326  }
1327  }
1328  }
1329  ost->sync_opts++;
1330  /*
1331  * For video, number of frames in == number of packets out.
1332  * But there may be reordering, so we can't throw away frames on encoder
1333  * flush, we need to limit them here, before they go into encoder.
1334  */
1335  ost->frame_number++;
1336 
1337  if (vstats_filename && frame_size)
1338  do_video_stats(ost, frame_size);
1339  }
1340 
1341  if (!ost->last_frame)
1342  ost->last_frame = av_frame_alloc();
1343  av_frame_unref(ost->last_frame);
1344  if (next_picture && ost->last_frame)
1345  av_frame_ref(ost->last_frame, next_picture);
1346  else
1347  av_frame_free(&ost->last_frame);
1348 
1349  return;
1350 error:
1351  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1352  exit_program(1);
1353 }
1354 
1355 static double psnr(double d)
1356 {
1357  return -10.0 * log10(d);
1358 }
1359 
1361 {
1362  AVCodecContext *enc;
1363  int frame_number;
1364  double ti1, bitrate, avg_bitrate;
1365 
1366  /* this is executed just the first time do_video_stats is called */
1367  if (!vstats_file) {
1368  vstats_file = fopen(vstats_filename, "w");
1369  if (!vstats_file) {
1370  perror("fopen");
1371  exit_program(1);
1372  }
1373  }
1374 
1375  enc = ost->enc_ctx;
1376  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1377  frame_number = ost->st->nb_frames;
1378  if (vstats_version <= 1) {
1379  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1380  ost->quality / (float)FF_QP2LAMBDA);
1381  } else {
1382  fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1383  ost->quality / (float)FF_QP2LAMBDA);
1384  }
1385 
1386  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1387  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1388 
1389  fprintf(vstats_file,"f_size= %6d ", frame_size);
1390  /* compute pts value */
1391  ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1392  if (ti1 < 0.01)
1393  ti1 = 0.01;
1394 
1395  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1396  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1397  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1398  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1399  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1400  }
1401 }
1402 
1403 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1404 
1406 {
1407  OutputFile *of = output_files[ost->file_index];
1408  int i;
1409 
1411 
1412  if (of->shortest) {
1413  for (i = 0; i < of->ctx->nb_streams; i++)
1414  output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1415  }
1416 }
1417 
1418 /**
1419  * Get and encode new output from any of the filtergraphs, without causing
1420  * activity.
1421  *
1422  * @return 0 for success, <0 for severe errors
1423  */
1424 static int reap_filters(int flush)
1425 {
1426  AVFrame *filtered_frame = NULL;
1427  int i;
1428 
1429  /* Reap all buffers present in the buffer sinks */
1430  for (i = 0; i < nb_output_streams; i++) {
1431  OutputStream *ost = output_streams[i];
1432  OutputFile *of = output_files[ost->file_index];
1434  AVCodecContext *enc = ost->enc_ctx;
1435  int ret = 0;
1436 
1437  if (!ost->filter || !ost->filter->graph->graph)
1438  continue;
1439  filter = ost->filter->filter;
1440 
1441  if (!ost->initialized) {
1442  char error[1024] = "";
1443  ret = init_output_stream(ost, error, sizeof(error));
1444  if (ret < 0) {
1445  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1446  ost->file_index, ost->index, error);
1447  exit_program(1);
1448  }
1449  }
1450 
1451  if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1452  return AVERROR(ENOMEM);
1453  }
1454  filtered_frame = ost->filtered_frame;
1455 
1456  while (1) {
1457  double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1458  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1460  if (ret < 0) {
1461  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1463  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1464  } else if (flush && ret == AVERROR_EOF) {
1466  do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1467  }
1468  break;
1469  }
1470  if (ost->finished) {
1471  av_frame_unref(filtered_frame);
1472  continue;
1473  }
1474  if (filtered_frame->pts != AV_NOPTS_VALUE) {
1475  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1476  AVRational filter_tb = av_buffersink_get_time_base(filter);
1477  AVRational tb = enc->time_base;
1478  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1479 
1480  tb.den <<= extra_bits;
1481  float_pts =
1482  av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1483  av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1484  float_pts /= 1 << extra_bits;
1485  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1486  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1487 
1488  filtered_frame->pts =
1489  av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1490  av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1491  }
1492  //if (ost->source_index >= 0)
1493  // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1494 
1495  switch (av_buffersink_get_type(filter)) {
1496  case AVMEDIA_TYPE_VIDEO:
1497  if (!ost->frame_aspect_ratio.num)
1498  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1499 
1500  if (debug_ts) {
1501  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1502  av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1503  float_pts,
1504  enc->time_base.num, enc->time_base.den);
1505  }
1506 
1507  do_video_out(of, ost, filtered_frame, float_pts);
1508  break;
1509  case AVMEDIA_TYPE_AUDIO:
1510  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1511  enc->channels != filtered_frame->channels) {
1513  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1514  break;
1515  }
1516  do_audio_out(of, ost, filtered_frame);
1517  break;
1518  default:
1519  // TODO support subtitle filters
1520  av_assert0(0);
1521  }
1522 
1523  av_frame_unref(filtered_frame);
1524  }
1525  }
1526 
1527  return 0;
1528 }
1529 
1530 static void print_final_stats(int64_t total_size)
1531 {
1532  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1533  uint64_t subtitle_size = 0;
1534  uint64_t data_size = 0;
1535  float percent = -1.0;
1536  int i, j;
1537  int pass1_used = 1;
1538 
1539  for (i = 0; i < nb_output_streams; i++) {
1540  OutputStream *ost = output_streams[i];
1541  switch (ost->enc_ctx->codec_type) {
1542  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1543  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1544  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1545  default: other_size += ost->data_size; break;
1546  }
1547  extra_size += ost->enc_ctx->extradata_size;
1548  data_size += ost->data_size;
1551  pass1_used = 0;
1552  }
1553 
1554  if (data_size && total_size>0 && total_size >= data_size)
1555  percent = 100.0 * (total_size - data_size) / data_size;
1556 
1557  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1558  video_size / 1024.0,
1559  audio_size / 1024.0,
1560  subtitle_size / 1024.0,
1561  other_size / 1024.0,
1562  extra_size / 1024.0);
1563  if (percent >= 0.0)
1564  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1565  else
1566  av_log(NULL, AV_LOG_INFO, "unknown");
1567  av_log(NULL, AV_LOG_INFO, "\n");
1568 
1569  /* print verbose per-stream stats */
1570  for (i = 0; i < nb_input_files; i++) {
1571  InputFile *f = input_files[i];
1572  uint64_t total_packets = 0, total_size = 0;
1573 
1574  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1575  i, f->ctx->filename);
1576 
1577  for (j = 0; j < f->nb_streams; j++) {
1578  InputStream *ist = input_streams[f->ist_index + j];
1579  enum AVMediaType type = ist->dec_ctx->codec_type;
1580 
1581  total_size += ist->data_size;
1582  total_packets += ist->nb_packets;
1583 
1584  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1585  i, j, media_type_string(type));
1586  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1587  ist->nb_packets, ist->data_size);
1588 
1589  if (ist->decoding_needed) {
1590  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1591  ist->frames_decoded);
1592  if (type == AVMEDIA_TYPE_AUDIO)
1593  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1594  av_log(NULL, AV_LOG_VERBOSE, "; ");
1595  }
1596 
1597  av_log(NULL, AV_LOG_VERBOSE, "\n");
1598  }
1599 
1600  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1601  total_packets, total_size);
1602  }
1603 
1604  for (i = 0; i < nb_output_files; i++) {
1605  OutputFile *of = output_files[i];
1606  uint64_t total_packets = 0, total_size = 0;
1607 
1608  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1609  i, of->ctx->filename);
1610 
1611  for (j = 0; j < of->ctx->nb_streams; j++) {
1612  OutputStream *ost = output_streams[of->ost_index + j];
1613  enum AVMediaType type = ost->enc_ctx->codec_type;
1614 
1615  total_size += ost->data_size;
1616  total_packets += ost->packets_written;
1617 
1618  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1619  i, j, media_type_string(type));
1620  if (ost->encoding_needed) {
1621  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1622  ost->frames_encoded);
1623  if (type == AVMEDIA_TYPE_AUDIO)
1624  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1625  av_log(NULL, AV_LOG_VERBOSE, "; ");
1626  }
1627 
1628  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1629  ost->packets_written, ost->data_size);
1630 
1631  av_log(NULL, AV_LOG_VERBOSE, "\n");
1632  }
1633 
1634  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1635  total_packets, total_size);
1636  }
1637  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1638  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1639  if (pass1_used) {
1640  av_log(NULL, AV_LOG_WARNING, "\n");
1641  } else {
1642  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1643  }
1644  }
1645 }
1646 
1647 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1648 {
1649  char buf[1024];
1650  AVBPrint buf_script;
1651  OutputStream *ost;
1652  AVFormatContext *oc;
1653  int64_t total_size;
1654  AVCodecContext *enc;
1655  int frame_number, vid, i;
1656  double bitrate;
1657  double speed;
1658  int64_t pts = INT64_MIN + 1;
1659  static int64_t last_time = -1;
1660  static int qp_histogram[52];
1661  int hours, mins, secs, us;
1662  int ret;
1663  float t;
1664 
1665  if (!print_stats && !is_last_report && !progress_avio)
1666  return;
1667 
1668  if (!is_last_report) {
1669  if (last_time == -1) {
1670  last_time = cur_time;
1671  return;
1672  }
1673  if ((cur_time - last_time) < 500000)
1674  return;
1675  last_time = cur_time;
1676  }
1677 
1678  t = (cur_time-timer_start) / 1000000.0;
1679 
1680 
1681  oc = output_files[0]->ctx;
1682 
1683  total_size = avio_size(oc->pb);
1684  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1685  total_size = avio_tell(oc->pb);
1686 
1687  buf[0] = '\0';
1688  vid = 0;
1689  av_bprint_init(&buf_script, 0, 1);
1690  for (i = 0; i < nb_output_streams; i++) {
1691  float q = -1;
1692  ost = output_streams[i];
1693  enc = ost->enc_ctx;
1694  if (!ost->stream_copy)
1695  q = ost->quality / (float) FF_QP2LAMBDA;
1696 
1697  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1698  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1699  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1700  ost->file_index, ost->index, q);
1701  }
1702  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1703  float fps;
1704 
1705  frame_number = ost->frame_number;
1706  fps = t > 1 ? frame_number / t : 0;
1707  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1708  frame_number, fps < 9.95, fps, q);
1709  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1710  av_bprintf(&buf_script, "fps=%.1f\n", fps);
1711  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1712  ost->file_index, ost->index, q);
1713  if (is_last_report)
1714  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1715  if (qp_hist) {
1716  int j;
1717  int qp = lrintf(q);
1718  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1719  qp_histogram[qp]++;
1720  for (j = 0; j < 32; j++)
1721  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1722  }
1723 
1724  if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1725  int j;
1726  double error, error_sum = 0;
1727  double scale, scale_sum = 0;
1728  double p;
1729  char type[3] = { 'Y','U','V' };
1730  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1731  for (j = 0; j < 3; j++) {
1732  if (is_last_report) {
1733  error = enc->error[j];
1734  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1735  } else {
1736  error = ost->error[j];
1737  scale = enc->width * enc->height * 255.0 * 255.0;
1738  }
1739  if (j)
1740  scale /= 4;
1741  error_sum += error;
1742  scale_sum += scale;
1743  p = psnr(error / scale);
1744  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1745  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1746  ost->file_index, ost->index, type[j] | 32, p);
1747  }
1748  p = psnr(error_sum / scale_sum);
1749  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1750  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1751  ost->file_index, ost->index, p);
1752  }
1753  vid = 1;
1754  }
1755  /* compute min output value */
1757  pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1758  ost->st->time_base, AV_TIME_BASE_Q));
1759  if (is_last_report)
1760  nb_frames_drop += ost->last_dropped;
1761  }
1762 
1763  secs = FFABS(pts) / AV_TIME_BASE;
1764  us = FFABS(pts) % AV_TIME_BASE;
1765  mins = secs / 60;
1766  secs %= 60;
1767  hours = mins / 60;
1768  mins %= 60;
1769 
1770  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1771  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1772 
1773  if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1774  "size=N/A time=");
1775  else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1776  "size=%8.0fkB time=", total_size / 1024.0);
1777  if (pts < 0)
1778  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1779  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1780  "%02d:%02d:%02d.%02d ", hours, mins, secs,
1781  (100 * us) / AV_TIME_BASE);
1782 
1783  if (bitrate < 0) {
1784  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1785  av_bprintf(&buf_script, "bitrate=N/A\n");
1786  }else{
1787  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1788  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1789  }
1790 
1791  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1792  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1793  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1794  av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1795  hours, mins, secs, us);
1796 
1798  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1800  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1801  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1802 
1803  if (speed < 0) {
1804  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1805  av_bprintf(&buf_script, "speed=N/A\n");
1806  } else {
1807  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1808  av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1809  }
1810 
1811  if (print_stats || is_last_report) {
1812  const char end = is_last_report ? '\n' : '\r';
1813  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1814  fprintf(stderr, "%s %c", buf, end);
1815  } else
1816  av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1817 
1818  fflush(stderr);
1819  }
1820 
1821  if (progress_avio) {
1822  av_bprintf(&buf_script, "progress=%s\n",
1823  is_last_report ? "end" : "continue");
1824  avio_write(progress_avio, buf_script.str,
1825  FFMIN(buf_script.len, buf_script.size - 1));
1826  avio_flush(progress_avio);
1827  av_bprint_finalize(&buf_script, NULL);
1828  if (is_last_report) {
1829  if ((ret = avio_closep(&progress_avio)) < 0)
1831  "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1832  }
1833  }
1834 
1835  if (is_last_report)
1836  print_final_stats(total_size);
1837 }
1838 
1839 static void flush_encoders(void)
1840 {
1841  int i, ret;
1842 
1843  for (i = 0; i < nb_output_streams; i++) {
1844  OutputStream *ost = output_streams[i];
1845  AVCodecContext *enc = ost->enc_ctx;
1846  OutputFile *of = output_files[ost->file_index];
1847 
1848  if (!ost->encoding_needed)
1849  continue;
1850 
1851  // Try to enable encoding with no input frames.
1852  // Maybe we should just let encoding fail instead.
1853  if (!ost->initialized) {
1854  FilterGraph *fg = ost->filter->graph;
1855  char error[1024] = "";
1856 
1858  "Finishing stream %d:%d without any data written to it.\n",
1859  ost->file_index, ost->st->index);
1860 
1861  if (ost->filter && !fg->graph) {
1862  int x;
1863  for (x = 0; x < fg->nb_inputs; x++) {
1864  InputFilter *ifilter = fg->inputs[x];
1865  if (ifilter->format < 0) {
1866  AVCodecParameters *par = ifilter->ist->st->codecpar;
1867  // We never got any input. Set a fake format, which will
1868  // come from libavformat.
1869  ifilter->format = par->format;
1870  ifilter->sample_rate = par->sample_rate;
1871  ifilter->channels = par->channels;
1872  ifilter->channel_layout = par->channel_layout;
1873  ifilter->width = par->width;
1874  ifilter->height = par->height;
1875  ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1876  }
1877  }
1878 
1880  continue;
1881 
1882  ret = configure_filtergraph(fg);
1883  if (ret < 0) {
1884  av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1885  exit_program(1);
1886  }
1887 
1888  finish_output_stream(ost);
1889  }
1890 
1891  ret = init_output_stream(ost, error, sizeof(error));
1892  if (ret < 0) {
1893  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1894  ost->file_index, ost->index, error);
1895  exit_program(1);
1896  }
1897  }
1898 
1899  if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1900  continue;
1901 #if FF_API_LAVF_FMT_RAWPICTURE
1902  if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1903  continue;
1904 #endif
1905 
1907  continue;
1908 
1909  for (;;) {
1910  const char *desc = NULL;
1911  AVPacket pkt;
1912  int pkt_size;
1913 
1914  switch (enc->codec_type) {
1915  case AVMEDIA_TYPE_AUDIO:
1916  desc = "audio";
1917  break;
1918  case AVMEDIA_TYPE_VIDEO:
1919  desc = "video";
1920  break;
1921  default:
1922  av_assert0(0);
1923  }
1924 
1925  av_init_packet(&pkt);
1926  pkt.data = NULL;
1927  pkt.size = 0;
1928 
1930 
1931  while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1932  ret = avcodec_send_frame(enc, NULL);
1933  if (ret < 0) {
1934  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1935  desc,
1936  av_err2str(ret));
1937  exit_program(1);
1938  }
1939  }
1940 
1941  update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1942  if (ret < 0 && ret != AVERROR_EOF) {
1943  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1944  desc,
1945  av_err2str(ret));
1946  exit_program(1);
1947  }
1948  if (ost->logfile && enc->stats_out) {
1949  fprintf(ost->logfile, "%s", enc->stats_out);
1950  }
1951  if (ret == AVERROR_EOF) {
1952  output_packet(of, &pkt, ost, 1);
1953  break;
1954  }
1955  if (ost->finished & MUXER_FINISHED) {
1956  av_packet_unref(&pkt);
1957  continue;
1958  }
1959  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1960  pkt_size = pkt.size;
1961  output_packet(of, &pkt, ost, 0);
1963  do_video_stats(ost, pkt_size);
1964  }
1965  }
1966  }
1967 }
1968 
1969 /*
1970  * Check whether a packet from ist should be written into ost at this time
1971  */
1973 {
1974  OutputFile *of = output_files[ost->file_index];
1975  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1976 
1977  if (ost->source_index != ist_index)
1978  return 0;
1979 
1980  if (ost->finished)
1981  return 0;
1982 
1983  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1984  return 0;
1985 
1986  return 1;
1987 }
1988 
1989 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1990 {
1991  OutputFile *of = output_files[ost->file_index];
1992  InputFile *f = input_files [ist->file_index];
1993  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1994  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1995  AVPicture pict;
1996  AVPacket opkt;
1997 
1998  av_init_packet(&opkt);
1999 
2000  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2002  return;
2003 
2004  if (!ost->frame_number && !ost->copy_prior_start) {
2005  int64_t comp_start = start_time;
2006  if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2007  comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2008  if (pkt->pts == AV_NOPTS_VALUE ?
2009  ist->pts < comp_start :
2010  pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2011  return;
2012  }
2013 
2014  if (of->recording_time != INT64_MAX &&
2015  ist->pts >= of->recording_time + start_time) {
2016  close_output_stream(ost);
2017  return;
2018  }
2019 
2020  if (f->recording_time != INT64_MAX) {
2021  start_time = f->ctx->start_time;
2022  if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2023  start_time += f->start_time;
2024  if (ist->pts >= f->recording_time + start_time) {
2025  close_output_stream(ost);
2026  return;
2027  }
2028  }
2029 
2030  /* force the input stream PTS */
2031  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2032  ost->sync_opts++;
2033 
2034  if (pkt->pts != AV_NOPTS_VALUE)
2035  opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2036  else
2037  opkt.pts = AV_NOPTS_VALUE;
2038 
2039  if (pkt->dts == AV_NOPTS_VALUE)
2040  opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2041  else
2042  opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2043  opkt.dts -= ost_tb_start_time;
2044 
2045  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
2047  if(!duration)
2048  duration = ist->dec_ctx->frame_size;
2049  opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
2050  (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
2051  ost->mux_timebase) - ost_tb_start_time;
2052  }
2053 
2054  opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2055 
2056  opkt.flags = pkt->flags;
2057  // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
2058  if ( ost->st->codecpar->codec_id != AV_CODEC_ID_H264
2061  && ost->st->codecpar->codec_id != AV_CODEC_ID_VC1
2062  ) {
2063  int ret = av_parser_change(ost->parser, ost->parser_avctx,
2064  &opkt.data, &opkt.size,
2065  pkt->data, pkt->size,
2066  pkt->flags & AV_PKT_FLAG_KEY);
2067  if (ret < 0) {
2068  av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
2069  av_err2str(ret));
2070  exit_program(1);
2071  }
2072  if (ret) {
2073  opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
2074  if (!opkt.buf)
2075  exit_program(1);
2076  }
2077  } else {
2078  opkt.data = pkt->data;
2079  opkt.size = pkt->size;
2080  }
2081  av_copy_packet_side_data(&opkt, pkt);
2082 
2083 #if FF_API_LAVF_FMT_RAWPICTURE
2084  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
2086  (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
2087  /* store AVPicture in AVPacket, as expected by the output format */
2088  int ret = avpicture_fill(&pict, opkt.data, ost->st->codecpar->format, ost->st->codecpar->width, ost->st->codecpar->height);
2089  if (ret < 0) {
2090  av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
2091  av_err2str(ret));
2092  exit_program(1);
2093  }
2094  opkt.data = (uint8_t *)&pict;
2095  opkt.size = sizeof(AVPicture);
2096  opkt.flags |= AV_PKT_FLAG_KEY;
2097  }
2098 #endif
2099 
2100  output_packet(of, &opkt, ost, 0);
2101 }
2102 
2104 {
2105  AVCodecContext *dec = ist->dec_ctx;
2106 
2107  if (!dec->channel_layout) {
2108  char layout_name[256];
2109 
2110  if (dec->channels > ist->guess_layout_max)
2111  return 0;
2113  if (!dec->channel_layout)
2114  return 0;
2115  av_get_channel_layout_string(layout_name, sizeof(layout_name),
2116  dec->channels, dec->channel_layout);
2117  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2118  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2119  }
2120  return 1;
2121 }
2122 
2123 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2124 {
2125  if (*got_output || ret<0)
2126  decode_error_stat[ret<0] ++;
2127 
2128  if (ret < 0 && exit_on_error)
2129  exit_program(1);
2130 
2131  if (exit_on_error && *got_output && ist) {
2133  av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
2134  exit_program(1);
2135  }
2136  }
2137 }
2138 
2139 // Filters can be configured only if the formats of all inputs are known.
2141 {
2142  int i;
2143  for (i = 0; i < fg->nb_inputs; i++) {
2144  if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2145  fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2146  return 0;
2147  }
2148  return 1;
2149 }
2150 
2152 {
2153  FilterGraph *fg = ifilter->graph;
2154  int need_reinit, ret, i;
2155 
2156  /* determine if the parameters for this input changed */
2157  need_reinit = ifilter->format != frame->format;
2158 
2159  switch (ifilter->ist->st->codecpar->codec_type) {
2160  case AVMEDIA_TYPE_AUDIO:
2161  need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2162  ifilter->channels != frame->channels ||
2163  ifilter->channel_layout != frame->channel_layout;
2164  break;
2165  case AVMEDIA_TYPE_VIDEO:
2166  need_reinit |= ifilter->width != frame->width ||
2167  ifilter->height != frame->height;
2168  break;
2169  }
2170 
2171  if (!ifilter->ist->reinit_filters && fg->graph)
2172  need_reinit = 0;
2173 
2174  if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2175  (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2176  need_reinit = 1;
2177 
2178  if (need_reinit) {
2179  ret = ifilter_parameters_from_frame(ifilter, frame);
2180  if (ret < 0)
2181  return ret;
2182  }
2183 
2184  /* (re)init the graph if possible, otherwise buffer the frame and return */
2185  if (need_reinit || !fg->graph) {
2186  for (i = 0; i < fg->nb_inputs; i++) {
2187  if (!ifilter_has_all_input_formats(fg)) {
2188  AVFrame *tmp = av_frame_clone(frame);
2189  if (!tmp)
2190  return AVERROR(ENOMEM);
2191  av_frame_unref(frame);
2192 
2193  if (!av_fifo_space(ifilter->frame_queue)) {
2194  ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2195  if (ret < 0) {
2196  av_frame_free(&tmp);
2197  return ret;
2198  }
2199  }
2200  av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2201  return 0;
2202  }
2203  }
2204 
2205  ret = reap_filters(1);
2206  if (ret < 0 && ret != AVERROR_EOF) {
2207  char errbuf[128];
2208  av_strerror(ret, errbuf, sizeof(errbuf));
2209 
2210  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
2211  return ret;
2212  }
2213 
2214  ret = configure_filtergraph(fg);
2215  if (ret < 0) {
2216  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2217  return ret;
2218  }
2219  }
2220 
2222  if (ret < 0) {
2223  if (ret != AVERROR_EOF)
2224  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2225  return ret;
2226  }
2227 
2228  return 0;
2229 }
2230 
2231 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2232 {
2233  int i, j, ret;
2234 
2235  ifilter->eof = 1;
2236 
2237  if (ifilter->filter) {
2238  ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
2239  if (ret < 0)
2240  return ret;
2241  } else {
2242  // the filtergraph was never configured
2243  FilterGraph *fg = ifilter->graph;
2244  for (i = 0; i < fg->nb_inputs; i++)
2245  if (!fg->inputs[i]->eof)
2246  break;
2247  if (i == fg->nb_inputs) {
2248  // All the input streams have finished without the filtergraph
2249  // ever being configured.
2250  // Mark the output streams as finished.
2251  for (j = 0; j < fg->nb_outputs; j++)
2252  finish_output_stream(fg->outputs[j]->ost);
2253  }
2254  }
2255 
2256  return 0;
2257 }
2258 
2259 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2260 // There is the following difference: if you got a frame, you must call
2261 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2262 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2263 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2264 {
2265  int ret;
2266 
2267  *got_frame = 0;
2268 
2269  if (pkt) {
2270  ret = avcodec_send_packet(avctx, pkt);
2271  // In particular, we don't expect AVERROR(EAGAIN), because we read all
2272  // decoded frames with avcodec_receive_frame() until done.
2273  if (ret < 0 && ret != AVERROR_EOF)
2274  return ret;
2275  }
2276 
2277  ret = avcodec_receive_frame(avctx, frame);
2278  if (ret < 0 && ret != AVERROR(EAGAIN))
2279  return ret;
2280  if (ret >= 0)
2281  *got_frame = 1;
2282 
2283  return 0;
2284 }
2285 
2286 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2287 {
2288  int i, ret;
2289  AVFrame *f;
2290 
2291  av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2292  for (i = 0; i < ist->nb_filters; i++) {
2293  if (i < ist->nb_filters - 1) {
2294  f = ist->filter_frame;
2295  ret = av_frame_ref(f, decoded_frame);
2296  if (ret < 0)
2297  break;
2298  } else
2299  f = decoded_frame;
2300  ret = ifilter_send_frame(ist->filters[i], f);
2301  if (ret == AVERROR_EOF)
2302  ret = 0; /* ignore */
2303  if (ret < 0) {
2305  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2306  break;
2307  }
2308  }
2309  return ret;
2310 }
2311 
2312 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2313  int *decode_failed)
2314 {
2315  AVFrame *decoded_frame;
2316  AVCodecContext *avctx = ist->dec_ctx;
2317  int ret, err = 0;
2318  AVRational decoded_frame_tb;
2319 
2320  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2321  return AVERROR(ENOMEM);
2322  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2323  return AVERROR(ENOMEM);
2324  decoded_frame = ist->decoded_frame;
2325 
2327  ret = decode(avctx, decoded_frame, got_output, pkt);
2328  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2329  if (ret < 0)
2330  *decode_failed = 1;
2331 
2332  if (ret >= 0 && avctx->sample_rate <= 0) {
2333  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2334  ret = AVERROR_INVALIDDATA;
2335  }
2336 
2337  if (ret != AVERROR_EOF)
2338  check_decode_result(ist, got_output, ret);
2339 
2340  if (!*got_output || ret < 0)
2341  return ret;
2342 
2343  ist->samples_decoded += decoded_frame->nb_samples;
2344  ist->frames_decoded++;
2345 
2346 #if 1
2347  /* increment next_dts to use for the case where the input stream does not
2348  have timestamps or there are multiple frames in the packet */
2349  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2350  avctx->sample_rate;
2351  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2352  avctx->sample_rate;
2353 #endif
2354 
2355  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2356  decoded_frame_tb = ist->st->time_base;
2357  } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2358  decoded_frame->pts = pkt->pts;
2359  decoded_frame_tb = ist->st->time_base;
2360  }else {
2361  decoded_frame->pts = ist->dts;
2362  decoded_frame_tb = AV_TIME_BASE_Q;
2363  }
2364  if (decoded_frame->pts != AV_NOPTS_VALUE)
2365  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2366  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2367  (AVRational){1, avctx->sample_rate});
2368  ist->nb_samples = decoded_frame->nb_samples;
2369  err = send_frame_to_filters(ist, decoded_frame);
2370 
2372  av_frame_unref(decoded_frame);
2373  return err < 0 ? err : ret;
2374 }
2375 
2376 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2377  int *decode_failed)
2378 {
2379  AVFrame *decoded_frame;
2380  int i, ret = 0, err = 0;
2381  int64_t best_effort_timestamp;
2382  int64_t dts = AV_NOPTS_VALUE;
2383  AVPacket avpkt;
2384 
2385  // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2386  // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2387  // skip the packet.
2388  if (!eof && pkt && pkt->size == 0)
2389  return 0;
2390 
2391  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2392  return AVERROR(ENOMEM);
2393  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2394  return AVERROR(ENOMEM);
2395  decoded_frame = ist->decoded_frame;
2396  if (ist->dts != AV_NOPTS_VALUE)
2397  dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2398  if (pkt) {
2399  avpkt = *pkt;
2400  avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2401  }
2402 
2403  // The old code used to set dts on the drain packet, which does not work
2404  // with the new API anymore.
2405  if (eof) {
2406  void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2407  if (!new)
2408  return AVERROR(ENOMEM);
2409  ist->dts_buffer = new;
2410  ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2411  }
2412 
2414  ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2415  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2416  if (ret < 0)
2417  *decode_failed = 1;
2418 
2419  // The following line may be required in some cases where there is no parser
2420  // or the parser does not has_b_frames correctly
2421  if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2422  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2423  ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2424  } else
2426  "video_delay is larger in decoder than demuxer %d > %d.\n"
2427  "If you want to help, upload a sample "
2428  "of this file to ftp://upload.ffmpeg.org/incoming/ "
2429  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2430  ist->dec_ctx->has_b_frames,
2431  ist->st->codecpar->video_delay);
2432  }
2433 
2434  if (ret != AVERROR_EOF)
2435  check_decode_result(ist, got_output, ret);
2436 
2437  if (*got_output && ret >= 0) {
2438  if (ist->dec_ctx->width != decoded_frame->width ||
2439  ist->dec_ctx->height != decoded_frame->height ||
2440  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2441  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2442  decoded_frame->width,
2443  decoded_frame->height,
2444  decoded_frame->format,
2445  ist->dec_ctx->width,
2446  ist->dec_ctx->height,
2447  ist->dec_ctx->pix_fmt);
2448  }
2449  }
2450 
2451  if (!*got_output || ret < 0)
2452  return ret;
2453 
2454  if(ist->top_field_first>=0)
2455  decoded_frame->top_field_first = ist->top_field_first;
2456 
2457  ist->frames_decoded++;
2458 
2459  if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2460  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2461  if (err < 0)
2462  goto fail;
2463  }
2464  ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2465 
2466  best_effort_timestamp= decoded_frame->best_effort_timestamp;
2467  *duration_pts = decoded_frame->pkt_duration;
2468 
2469  if (ist->framerate.num)
2470  best_effort_timestamp = ist->cfr_next_pts++;
2471 
2472  if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2473  best_effort_timestamp = ist->dts_buffer[0];
2474 
2475  for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2476  ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2477  ist->nb_dts_buffer--;
2478  }
2479 
2480  if(best_effort_timestamp != AV_NOPTS_VALUE) {
2481  int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2482 
2483  if (ts != AV_NOPTS_VALUE)
2484  ist->next_pts = ist->pts = ts;
2485  }
2486 
2487  if (debug_ts) {
2488  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2489  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2490  ist->st->index, av_ts2str(decoded_frame->pts),
2491  av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2492  best_effort_timestamp,
2493  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2494  decoded_frame->key_frame, decoded_frame->pict_type,
2495  ist->st->time_base.num, ist->st->time_base.den);
2496  }
2497 
2498  if (ist->st->sample_aspect_ratio.num)
2499  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2500 
2501  err = send_frame_to_filters(ist, decoded_frame);
2502 
2503 fail:
2505  av_frame_unref(decoded_frame);
2506  return err < 0 ? err : ret;
2507 }
2508 
2509 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2510  int *decode_failed)
2511 {
2512  AVSubtitle subtitle;
2513  int free_sub = 1;
2514  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2515  &subtitle, got_output, pkt);
2516 
2517  check_decode_result(NULL, got_output, ret);
2518 
2519  if (ret < 0 || !*got_output) {
2520  *decode_failed = 1;
2521  if (!pkt->size)
2522  sub2video_flush(ist);
2523  return ret;
2524  }
2525 
2526  if (ist->fix_sub_duration) {
2527  int end = 1;
2528  if (ist->prev_sub.got_output) {
2529  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2530  1000, AV_TIME_BASE);
2531  if (end < ist->prev_sub.subtitle.end_display_time) {
2532  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2533  "Subtitle duration reduced from %"PRId32" to %d%s\n",
2535  end <= 0 ? ", dropping it" : "");
2537  }
2538  }
2539  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2540  FFSWAP(int, ret, ist->prev_sub.ret);
2541  FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2542  if (end <= 0)
2543  goto out;
2544  }
2545 
2546  if (!*got_output)
2547  return ret;
2548 
2549  if (ist->sub2video.frame) {
2550  sub2video_update(ist, &subtitle);
2551  } else if (ist->nb_filters) {
2552  if (!ist->sub2video.sub_queue)
2553  ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2554  if (!ist->sub2video.sub_queue)
2555  exit_program(1);
2556  if (!av_fifo_space(ist->sub2video.sub_queue)) {
2558  if (ret < 0)
2559  exit_program(1);
2560  }
2561  av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2562  free_sub = 0;
2563  }
2564 
2565  if (!subtitle.num_rects)
2566  goto out;
2567 
2568  ist->frames_decoded++;
2569 
2570  for (i = 0; i < nb_output_streams; i++) {
2571  OutputStream *ost = output_streams[i];
2572 
2573  if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2574  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2575  continue;
2576 
2577  do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2578  }
2579 
2580 out:
2581  if (free_sub)
2582  avsubtitle_free(&subtitle);
2583  return ret;
2584 }
2585 
2587 {
2588  int i, ret;
2589  /* TODO keep pts also in stream time base to avoid converting back */
2590  int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2592 
2593  for (i = 0; i < ist->nb_filters; i++) {
2594  ret = ifilter_send_eof(ist->filters[i], pts);
2595  if (ret < 0)
2596  return ret;
2597  }
2598  return 0;
2599 }
2600 
2601 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2602 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2603 {
2604  int ret = 0, i;
2605  int repeating = 0;
2606  int eof_reached = 0;
2607 
2608  AVPacket avpkt;
2609  if (!ist->saw_first_ts) {
2610  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2611  ist->pts = 0;
2612  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2613  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2614  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2615  }
2616  ist->saw_first_ts = 1;
2617  }
2618 
2619  if (ist->next_dts == AV_NOPTS_VALUE)
2620  ist->next_dts = ist->dts;
2621  if (ist->next_pts == AV_NOPTS_VALUE)
2622  ist->next_pts = ist->pts;
2623 
2624  if (!pkt) {
2625  /* EOF handling */
2626  av_init_packet(&avpkt);
2627  avpkt.data = NULL;
2628  avpkt.size = 0;
2629  } else {
2630  avpkt = *pkt;
2631  }
2632 
2633  if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2634  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2635  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2636  ist->next_pts = ist->pts = ist->dts;
2637  }
2638 
2639  // while we have more to decode or while the decoder did output something on EOF
2640  while (ist->decoding_needed) {
2641  int64_t duration_dts = 0;
2642  int64_t duration_pts = 0;
2643  int got_output = 0;
2644  int decode_failed = 0;
2645 
2646  ist->pts = ist->next_pts;
2647  ist->dts = ist->next_dts;
2648 
2649  switch (ist->dec_ctx->codec_type) {
2650  case AVMEDIA_TYPE_AUDIO:
2651  ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2652  &decode_failed);
2653  break;
2654  case AVMEDIA_TYPE_VIDEO:
2655  ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt,
2656  &decode_failed);
2657  if (!repeating || !pkt || got_output) {
2658  if (pkt && pkt->duration) {
2659  duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2660  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2662  duration_dts = ((int64_t)AV_TIME_BASE *
2663  ist->dec_ctx->framerate.den * ticks) /
2665  }
2666 
2667  if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2668  ist->next_dts += duration_dts;
2669  }else
2670  ist->next_dts = AV_NOPTS_VALUE;
2671  }
2672 
2673  if (got_output) {
2674  if (duration_pts > 0) {
2675  ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2676  } else {
2677  ist->next_pts += duration_dts;
2678  }
2679  }
2680  break;
2681  case AVMEDIA_TYPE_SUBTITLE:
2682  if (repeating)
2683  break;
2684  ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2685  if (!pkt && ret >= 0)
2686  ret = AVERROR_EOF;
2687  break;
2688  default:
2689  return -1;
2690  }
2691 
2692  if (ret == AVERROR_EOF) {
2693  eof_reached = 1;
2694  break;
2695  }
2696 
2697  if (ret < 0) {
2698  if (decode_failed) {
2699  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2700  ist->file_index, ist->st->index, av_err2str(ret));
2701  } else {
2702  av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2703  "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2704  }
2705  if (!decode_failed || exit_on_error)
2706  exit_program(1);
2707  break;
2708  }
2709 
2710  if (got_output)
2711  ist->got_output = 1;
2712 
2713  if (!got_output)
2714  break;
2715 
2716  // During draining, we might get multiple output frames in this loop.
2717  // ffmpeg.c does not drain the filter chain on configuration changes,
2718  // which means if we send multiple frames at once to the filters, and
2719  // one of those frames changes configuration, the buffered frames will
2720  // be lost. This can upset certain FATE tests.
2721  // Decode only 1 frame per call on EOF to appease these FATE tests.
2722  // The ideal solution would be to rewrite decoding to use the new
2723  // decoding API in a better way.
2724  if (!pkt)
2725  break;
2726 
2727  repeating = 1;
2728  }
2729 
2730  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2731  /* except when looping we need to flush but not to send an EOF */
2732  if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2733  int ret = send_filter_eof(ist);
2734  if (ret < 0) {
2735  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2736  exit_program(1);
2737  }
2738  }
2739 
2740  /* handle stream copy */
2741  if (!ist->decoding_needed) {
2742  ist->dts = ist->next_dts;
2743  switch (ist->dec_ctx->codec_type) {
2744  case AVMEDIA_TYPE_AUDIO:
2745  if (ist->dec_ctx->sample_rate) {
2746  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2747  ist->dec_ctx->sample_rate;
2748  } else {
2749  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2750  }
2751  break;
2752  case AVMEDIA_TYPE_VIDEO:
2753  if (ist->framerate.num) {
2754  // TODO: Remove work-around for c99-to-c89 issue 7
2755  AVRational time_base_q = AV_TIME_BASE_Q;
2756  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2757  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2758  } else if (pkt->duration) {
2759  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2760  } else if(ist->dec_ctx->framerate.num != 0) {
2761  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2762  ist->next_dts += ((int64_t)AV_TIME_BASE *
2763  ist->dec_ctx->framerate.den * ticks) /
2765  }
2766  break;
2767  }
2768  ist->pts = ist->dts;
2769  ist->next_pts = ist->next_dts;
2770  }
2771  for (i = 0; pkt && i < nb_output_streams; i++) {
2772  OutputStream *ost = output_streams[i];
2773 
2774  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2775  continue;
2776 
2777  do_streamcopy(ist, ost, pkt);
2778  }
2779 
2780  return !eof_reached;
2781 }
2782 
2783 static void print_sdp(void)
2784 {
2785  char sdp[16384];
2786  int i;
2787  int j;
2788  AVIOContext *sdp_pb;
2789  AVFormatContext **avc;
2790 
2791  for (i = 0; i < nb_output_files; i++) {
2792  if (!output_files[i]->header_written)
2793  return;
2794  }
2795 
2796  avc = av_malloc_array(nb_output_files, sizeof(*avc));
2797  if (!avc)
2798  exit_program(1);
2799  for (i = 0, j = 0; i < nb_output_files; i++) {
2800  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2801  avc[j] = output_files[i]->ctx;
2802  j++;
2803  }
2804  }
2805 
2806  if (!j)
2807  goto fail;
2808 
2809  av_sdp_create(avc, j, sdp, sizeof(sdp));
2810 
2811  if (!sdp_filename) {
2812  printf("SDP:\n%s\n", sdp);
2813  fflush(stdout);
2814  } else {
2815  if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2816  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2817  } else {
2818  avio_printf(sdp_pb, "SDP:\n%s", sdp);
2819  avio_closep(&sdp_pb);
2821  }
2822  }
2823 
2824 fail:
2825  av_freep(&avc);
2826 }
2827 
2829 {
2830  int i;
2831  for (i = 0; hwaccels[i].name; i++)
2832  if (hwaccels[i].pix_fmt == pix_fmt)
2833  return &hwaccels[i];
2834  return NULL;
2835 }
2836 
2838 {
2839  InputStream *ist = s->opaque;
2840  const enum AVPixelFormat *p;
2841  int ret;
2842 
2843  for (p = pix_fmts; *p != -1; p++) {
2845  const HWAccel *hwaccel;
2846 
2847  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2848  break;
2849 
2850  hwaccel = get_hwaccel(*p);
2851  if (!hwaccel ||
2852  (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2853  (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2854  continue;
2855 
2856  ret = hwaccel->init(s);
2857  if (ret < 0) {
2858  if (ist->hwaccel_id == hwaccel->id) {
2860  "%s hwaccel requested for input stream #%d:%d, "
2861  "but cannot be initialized.\n", hwaccel->name,
2862  ist->file_index, ist->st->index);
2863  return AV_PIX_FMT_NONE;
2864  }
2865  continue;
2866  }
2867 
2868  if (ist->hw_frames_ctx) {
2870  if (!s->hw_frames_ctx)
2871  return AV_PIX_FMT_NONE;
2872  }
2873 
2874  ist->active_hwaccel_id = hwaccel->id;
2875  ist->hwaccel_pix_fmt = *p;
2876  break;
2877  }
2878 
2879  return *p;
2880 }
2881 
2883 {
2884  InputStream *ist = s->opaque;
2885 
2886  if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2887  return ist->hwaccel_get_buffer(s, frame, flags);
2888 
2889  return avcodec_default_get_buffer2(s, frame, flags);
2890 }
2891 
2892 static int init_input_stream(int ist_index, char *error, int error_len)
2893 {
2894  int ret;
2895  InputStream *ist = input_streams[ist_index];
2896 
2897  if (ist->decoding_needed) {
2898  AVCodec *codec = ist->dec;
2899  if (!codec) {
2900  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2901  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2902  return AVERROR(EINVAL);
2903  }
2904 
2905  ist->dec_ctx->opaque = ist;
2906  ist->dec_ctx->get_format = get_format;
2907  ist->dec_ctx->get_buffer2 = get_buffer;
2908  ist->dec_ctx->thread_safe_callbacks = 1;
2909 
2910  av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2911  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2912  (ist->decoding_needed & DECODING_FOR_OST)) {
2913  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2915  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2916  }
2917 
2918  av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2919 
2920  /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2921  * audio, and video decoders such as cuvid or mediacodec */
2923 
2924  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2925  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2926  /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2928  av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2929 
2930  ret = hw_device_setup_for_decode(ist);
2931  if (ret < 0) {
2932  snprintf(error, error_len, "Device setup failed for "
2933  "decoder on input stream #%d:%d : %s",
2934  ist->file_index, ist->st->index, av_err2str(ret));
2935  return ret;
2936  }
2937 
2938  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2939  if (ret == AVERROR_EXPERIMENTAL)
2940  abort_codec_experimental(codec, 0);
2941 
2942  snprintf(error, error_len,
2943  "Error while opening decoder for input stream "
2944  "#%d:%d : %s",
2945  ist->file_index, ist->st->index, av_err2str(ret));
2946  return ret;
2947  }
2949  }
2950 
2951  ist->next_pts = AV_NOPTS_VALUE;
2952  ist->next_dts = AV_NOPTS_VALUE;
2953 
2954  return 0;
2955 }
2956 
2958 {
2959  if (ost->source_index >= 0)
2960  return input_streams[ost->source_index];
2961  return NULL;
2962 }
2963 
2964 static int compare_int64(const void *a, const void *b)
2965 {
2966  return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2967 }
2968 
2969 /* open the muxer when all the streams are initialized */
2970 static int check_init_output_file(OutputFile *of, int file_index)
2971 {
2972  int ret, i;
2973 
2974  for (i = 0; i < of->ctx->nb_streams; i++) {
2975  OutputStream *ost = output_streams[of->ost_index + i];
2976  if (!ost->initialized)
2977  return 0;
2978  }
2979 
2980  of->ctx->interrupt_callback = int_cb;
2981 
2982  ret = avformat_write_header(of->ctx, &of->opts);
2983  if (ret < 0) {
2985  "Could not write header for output file #%d "
2986  "(incorrect codec parameters ?): %s\n",
2987  file_index, av_err2str(ret));
2988  return ret;
2989  }
2990  //assert_avoptions(of->opts);
2991  of->header_written = 1;
2992 
2993  av_dump_format(of->ctx, file_index, of->ctx->filename, 1);
2994 
2995  if (sdp_filename || want_sdp)
2996  print_sdp();
2997 
2998  /* flush the muxing queues */
2999  for (i = 0; i < of->ctx->nb_streams; i++) {
3000  OutputStream *ost = output_streams[of->ost_index + i];
3001 
3002  /* try to improve muxing time_base (only possible if nothing has been written yet) */
3003  if (!av_fifo_size(ost->muxing_queue))
3004  ost->mux_timebase = ost->st->time_base;
3005 
3006  while (av_fifo_size(ost->muxing_queue)) {
3007  AVPacket pkt;
3008  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
3009  write_packet(of, &pkt, ost, 1);
3010  }
3011  }
3012 
3013  return 0;
3014 }
3015 
3017 {
3018  AVBSFContext *ctx;
3019  int i, ret;
3020 
3021  if (!ost->nb_bitstream_filters)
3022  return 0;
3023 
3024  for (i = 0; i < ost->nb_bitstream_filters; i++) {
3025  ctx = ost->bsf_ctx[i];
3026 
3027  ret = avcodec_parameters_copy(ctx->par_in,
3028  i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
3029  if (ret < 0)
3030  return ret;
3031 
3032  ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
3033 
3034  ret = av_bsf_init(ctx);
3035  if (ret < 0) {
3036  av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3037  ost->bsf_ctx[i]->filter->name);
3038  return ret;
3039  }
3040  }
3041 
3042  ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
3043  ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3044  if (ret < 0)
3045  return ret;
3046 
3047  ost->st->time_base = ctx->time_base_out;
3048 
3049  return 0;
3050 }
3051 
3053 {
3054  OutputFile *of = output_files[ost->file_index];
3055  InputStream *ist = get_input_stream(ost);
3056  AVCodecParameters *par_dst = ost->st->codecpar;
3057  AVCodecParameters *par_src = ost->ref_par;
3058  AVRational sar;
3059  int i, ret;
3060  uint32_t codec_tag = par_dst->codec_tag;
3061 
3062  av_assert0(ist && !ost->filter);
3063 
3064  ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3065  if (ret >= 0)
3066  ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3067  if (ret < 0) {
3069  "Error setting up codec context options.\n");
3070  return ret;
3071  }
3073 
3074  if (!codec_tag) {
3075  unsigned int codec_tag_tmp;
3076  if (!of->ctx->oformat->codec_tag ||
3077  av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3078  !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3079  codec_tag = par_src->codec_tag;
3080  }
3081 
3082  ret = avcodec_parameters_copy(par_dst, par_src);
3083  if (ret < 0)
3084  return ret;
3085 
3086  par_dst->codec_tag = codec_tag;
3087 
3088  if (!ost->frame_rate.num)
3089  ost->frame_rate = ist->framerate;
3090  ost->st->avg_frame_rate = ost->frame_rate;
3091 
3093  if (ret < 0)
3094  return ret;
3095 
3096  // copy timebase while removing common factors
3097  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3099 
3100  // copy estimated duration as a hint to the muxer
3101  if (ost->st->duration <= 0 && ist->st->duration > 0)
3102  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3103 
3104  // copy disposition
3105  ost->st->disposition = ist->st->disposition;
3106 
3107  if (ist->st->nb_side_data) {
3108  for (i = 0; i < ist->st->nb_side_data; i++) {
3109  const AVPacketSideData *sd_src = &ist->st->side_data[i];
3110  uint8_t *dst_data;
3111 
3112  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3113  if (!dst_data)
3114  return AVERROR(ENOMEM);
3115  memcpy(dst_data, sd_src->data, sd_src->size);
3116  }
3117  }
3118 
3119  if (ost->rotate_overridden) {
3121  sizeof(int32_t) * 9);
3122  if (sd)
3124  }
3125 
3126  ost->parser = av_parser_init(par_dst->codec_id);
3128  if (!ost->parser_avctx)
3129  return AVERROR(ENOMEM);
3130 
3131  switch (par_dst->codec_type) {
3132  case AVMEDIA_TYPE_AUDIO:
3133  if (audio_volume != 256) {
3134  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3135  exit_program(1);
3136  }
3137  if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3138  par_dst->block_align= 0;
3139  if(par_dst->codec_id == AV_CODEC_ID_AC3)
3140  par_dst->block_align= 0;
3141  break;
3142  case AVMEDIA_TYPE_VIDEO:
3143  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3144  sar =
3146  (AVRational){ par_dst->height, par_dst->width });
3147  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3148  "with stream copy may produce invalid files\n");
3149  }
3150  else if (ist->st->sample_aspect_ratio.num)
3151  sar = ist->st->sample_aspect_ratio;
3152  else
3153  sar = par_src->sample_aspect_ratio;
3154  ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3155  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3156  ost->st->r_frame_rate = ist->st->r_frame_rate;
3157  break;
3158  }
3159 
3160  ost->mux_timebase = ist->st->time_base;
3161 
3162  return 0;
3163 }
3164 
3166 {
3167  AVDictionaryEntry *e;
3168 
3169  uint8_t *encoder_string;
3170  int encoder_string_len;
3171  int format_flags = 0;
3172  int codec_flags = 0;
3173 
3174  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3175  return;
3176 
3177  e = av_dict_get(of->opts, "fflags", NULL, 0);
3178  if (e) {
3179  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3180  if (!o)
3181  return;
3182  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3183  }
3184  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3185  if (e) {
3186  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3187  if (!o)
3188  return;
3189  av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3190  }
3191 
3192  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3193  encoder_string = av_mallocz(encoder_string_len);
3194  if (!encoder_string)
3195  exit_program(1);
3196 
3197  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3198  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3199  else
3200  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3201  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3202  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3204 }
3205 
3206 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3207  AVCodecContext *avctx)
3208 {
3209  char *p;
3210  int n = 1, i, size, index = 0;
3211  int64_t t, *pts;
3212 
3213  for (p = kf; *p; p++)
3214  if (*p == ',')
3215  n++;
3216  size = n;
3217  pts = av_malloc_array(size, sizeof(*pts));
3218  if (!pts) {
3219  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3220  exit_program(1);
3221  }
3222 
3223  p = kf;
3224  for (i = 0; i < n; i++) {
3225  char *next = strchr(p, ',');
3226 
3227  if (next)
3228  *next++ = 0;
3229 
3230  if (!memcmp(p, "chapters", 8)) {
3231 
3232  AVFormatContext *avf = output_files[ost->file_index]->ctx;
3233  int j;
3234 
3235  if (avf->nb_chapters > INT_MAX - size ||
3236  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3237  sizeof(*pts)))) {
3239  "Could not allocate forced key frames array.\n");
3240  exit_program(1);
3241  }
3242  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3243  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3244 
3245  for (j = 0; j < avf->nb_chapters; j++) {
3246  AVChapter *c = avf->chapters[j];
3247  av_assert1(index < size);
3248  pts[index++] = av_rescale_q(c->start, c->time_base,
3249  avctx->time_base) + t;
3250  }
3251 
3252  } else {
3253 
3254  t = parse_time_or_die("force_key_frames", p, 1);
3255  av_assert1(index < size);
3256  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3257 
3258  }
3259 
3260  p = next;
3261  }
3262 
3263  av_assert0(index == size);
3264  qsort(pts, size, sizeof(*pts), compare_int64);
3265  ost->forced_kf_count = size;
3266  ost->forced_kf_pts = pts;
3267 }
3268 
3269 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3270 {
3271  InputStream *ist = get_input_stream(ost);
3272  AVCodecContext *enc_ctx = ost->enc_ctx;
3273  AVFormatContext *oc;
3274 
3275  if (ost->enc_timebase.num > 0) {
3276  enc_ctx->time_base = ost->enc_timebase;
3277  return;
3278  }
3279 
3280  if (ost->enc_timebase.num < 0) {
3281  if (ist) {
3282  enc_ctx->time_base = ist->st->time_base;
3283  return;
3284  }
3285 
3286  oc = output_files[ost->file_index]->ctx;
3287  av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3288  }
3289 
3290  enc_ctx->time_base = default_time_base;
3291 }
3292 
3294 {
3295  InputStream *ist = get_input_stream(ost);
3296  AVCodecContext *enc_ctx = ost->enc_ctx;
3298  AVFormatContext *oc = output_files[ost->file_index]->ctx;
3299  int j, ret;
3300 
3301  set_encoder_id(output_files[ost->file_index], ost);
3302 
3303  // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3304  // hand, the legacy API makes demuxers set "rotate" metadata entries,
3305  // which have to be filtered out to prevent leaking them to output files.
3306  av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3307 
3308  if (ist) {
3309  ost->st->disposition = ist->st->disposition;
3310 
3311  dec_ctx = ist->dec_ctx;
3312 
3313  enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3314  } else {
3315  for (j = 0; j < oc->nb_streams; j++) {
3316  AVStream *st = oc->streams[j];
3317  if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3318  break;
3319  }
3320  if (j == oc->nb_streams)
3321  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3324  }
3325 
3326  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3327  if (!ost->frame_rate.num)
3329  if (ist && !ost->frame_rate.num)
3330  ost->frame_rate = ist->framerate;
3331  if (ist && !ost->frame_rate.num)
3332  ost->frame_rate = ist->st->r_frame_rate;
3333  if (ist && !ost->frame_rate.num) {
3334  ost->frame_rate = (AVRational){25, 1};
3336  "No information "
3337  "about the input framerate is available. Falling "
3338  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3339  "if you want a different framerate.\n",
3340  ost->file_index, ost->index);
3341  }
3342 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3343  if (ost->enc->supported_framerates && !ost->force_fps) {
3344  int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3345  ost->frame_rate = ost->enc->supported_framerates[idx];
3346  }
3347  // reduce frame rate for mpeg4 to be within the spec limits
3348  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3349  av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3350  ost->frame_rate.num, ost->frame_rate.den, 65535);
3351  }
3352  }
3353 
3354  switch (enc_ctx->codec_type) {
3355  case AVMEDIA_TYPE_AUDIO:
3357  if (dec_ctx)
3358  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3359  av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3363 
3364  init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3365  break;
3366 
3367  case AVMEDIA_TYPE_VIDEO:
3369 
3370  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3372  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3374  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3375  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3376  }
3377  for (j = 0; j < ost->forced_kf_count; j++)
3378  ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3380  enc_ctx->time_base);
3381 
3382  enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3383  enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3384  enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3385  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3386  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3388 
3389  enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3390  if (dec_ctx)
3391  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3392  av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3393 
3394  enc_ctx->framerate = ost->frame_rate;
3395 
3396  ost->st->avg_frame_rate = ost->frame_rate;
3397 
3398  if (!dec_ctx ||
3399  enc_ctx->width != dec_ctx->width ||
3400  enc_ctx->height != dec_ctx->height ||
3401  enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3403  }
3404 
3405  if (ost->forced_keyframes) {
3406  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3409  if (ret < 0) {
3411  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3412  return ret;
3413  }
3418 
3419  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3420  // parse it only for static kf timings
3421  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3423  }
3424  }
3425  break;
3426  case AVMEDIA_TYPE_SUBTITLE:
3427  enc_ctx->time_base = AV_TIME_BASE_Q;
3428  if (!enc_ctx->width) {
3429  enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3430  enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3431  }
3432  break;
3433  case AVMEDIA_TYPE_DATA:
3434  break;
3435  default:
3436  abort();
3437  break;
3438  }
3439 
3440  ost->mux_timebase = enc_ctx->time_base;
3441 
3442  return 0;
3443 }
3444 
3445 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3446 {
3447  int ret = 0;
3448 
3449  if (ost->encoding_needed) {
3450  AVCodec *codec = ost->enc;
3451  AVCodecContext *dec = NULL;
3452  InputStream *ist;
3453 
3454  ret = init_output_stream_encode(ost);
3455  if (ret < 0)
3456  return ret;
3457 
3458  if ((ist = get_input_stream(ost)))
3459  dec = ist->dec_ctx;
3460  if (dec && dec->subtitle_header) {
3461  /* ASS code assumes this buffer is null terminated so add extra byte. */
3463  if (!ost->enc_ctx->subtitle_header)
3464  return AVERROR(ENOMEM);
3465  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3467  }
3468  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3469  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3470  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3471  !codec->defaults &&
3472  !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3473  !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3474  av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3475 
3476  if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter) &&
3480  if (!ost->enc_ctx->hw_frames_ctx)
3481  return AVERROR(ENOMEM);
3482  } else {
3483  ret = hw_device_setup_for_encode(ost);
3484  if (ret < 0) {
3485  snprintf(error, error_len, "Device setup failed for "
3486  "encoder on output stream #%d:%d : %s",
3487  ost->file_index, ost->index, av_err2str(ret));
3488  return ret;
3489  }
3490  }
3491 
3492  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3493  if (ret == AVERROR_EXPERIMENTAL)
3494  abort_codec_experimental(codec, 1);
3495  snprintf(error, error_len,
3496  "Error while opening encoder for output stream #%d:%d - "
3497  "maybe incorrect parameters such as bit_rate, rate, width or height",
3498  ost->file_index, ost->index);
3499  return ret;
3500  }
3501  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3502  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3504  ost->enc_ctx->frame_size);
3506  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
3507  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3508  " It takes bits/s as argument, not kbits/s\n");
3509 
3511  if (ret < 0) {
3513  "Error initializing the output stream codec context.\n");
3514  exit_program(1);
3515  }
3516  /*
3517  * FIXME: ost->st->codec should't be needed here anymore.
3518  */
3519  ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3520  if (ret < 0)
3521  return ret;
3522 
3523  if (ost->enc_ctx->nb_coded_side_data) {
3524  int i;
3525 
3526  for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3527  const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3528  uint8_t *dst_data;
3529 
3530  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3531  if (!dst_data)
3532  return AVERROR(ENOMEM);
3533  memcpy(dst_data, sd_src->data, sd_src->size);
3534  }
3535  }
3536 
3537  /*
3538  * Add global input side data. For now this is naive, and copies it
3539  * from the input stream's global side data. All side data should
3540  * really be funneled over AVFrame and libavfilter, then added back to
3541  * packet side data, and then potentially using the first packet for
3542  * global side data.
3543  */
3544  if (ist) {
3545  int i;
3546  for (i = 0; i < ist->st->nb_side_data; i++) {
3547  AVPacketSideData *sd = &ist->st->side_data[i];
3548  uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3549  if (!dst)
3550  return AVERROR(ENOMEM);
3551  memcpy(dst, sd->data, sd->size);
3552  if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3553  av_display_rotation_set((uint32_t *)dst, 0);
3554  }
3555  }
3556 
3557  // copy timebase while removing common factors
3558  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3559  ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3560 
3561  // copy estimated duration as a hint to the muxer
3562  if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3563  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3564 
3565  ost->st->codec->codec= ost->enc_ctx->codec;
3566  } else if (ost->stream_copy) {
3567  ret = init_output_stream_streamcopy(ost);
3568  if (ret < 0)
3569  return ret;
3570 
3571  /*
3572  * FIXME: will the codec context used by the parser during streamcopy
3573  * This should go away with the new parser API.
3574  */
3576  if (ret < 0)
3577  return ret;
3578  }
3579 
3580  // parse user provided disposition, and update stream values
3581  if (ost->disposition) {
3582  static const AVOption opts[] = {
3583  { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3584  { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3585  { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3586  { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3587  { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3588  { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3589  { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3590  { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3591  { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3592  { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3593  { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3594  { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3595  { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3596  { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3597  { NULL },
3598  };
3599  static const AVClass class = {
3600  .class_name = "",
3601  .item_name = av_default_item_name,
3602  .option = opts,
3603  .version = LIBAVUTIL_VERSION_INT,
3604  };
3605  const AVClass *pclass = &class;
3606 
3607  ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3608  if (ret < 0)
3609  return ret;
3610  }
3611 
3612  /* initialize bitstream filters for the output stream
3613  * needs to be done here, because the codec id for streamcopy is not
3614  * known until now */
3615  ret = init_output_bsfs(ost);
3616  if (ret < 0)
3617  return ret;
3618 
3619  ost->initialized = 1;
3620 
3621  ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3622  if (ret < 0)
3623  return ret;
3624 
3625  return ret;
3626 }
3627 
3628 static void report_new_stream(int input_index, AVPacket *pkt)
3629 {
3630  InputFile *file = input_files[input_index];
3631  AVStream *st = file->ctx->streams[pkt->stream_index];
3632 
3633  if (pkt->stream_index < file->nb_streams_warn)
3634  return;
3635  av_log(file->ctx, AV_LOG_WARNING,
3636  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3638  input_index, pkt->stream_index,
3639  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3640  file->nb_streams_warn = pkt->stream_index + 1;
3641 }
3642 
3643 static int transcode_init(void)
3644 {
3645  int ret = 0, i, j, k;
3646  AVFormatContext *oc;
3647  OutputStream *ost;
3648  InputStream *ist;
3649  char error[1024] = {0};
3650 
3651  for (i = 0; i < nb_filtergraphs; i++) {
3652  FilterGraph *fg = filtergraphs[i];
3653  for (j = 0; j < fg->nb_outputs; j++) {
3654  OutputFilter *ofilter = fg->outputs[j];
3655  if (!ofilter->ost || ofilter->ost->source_index >= 0)
3656  continue;
3657  if (fg->nb_inputs != 1)
3658  continue;
3659  for (k = nb_input_streams-1; k >= 0 ; k--)
3660  if (fg->inputs[0]->ist == input_streams[k])
3661  break;
3662  ofilter->ost->source_index = k;
3663  }
3664  }
3665 
3666  /* init framerate emulation */
3667  for (i = 0; i < nb_input_files; i++) {
3668  InputFile *ifile = input_files[i];
3669  if (ifile->rate_emu)
3670  for (j = 0; j < ifile->nb_streams; j++)
3671  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3672  }
3673 
3674  /* init input streams */
3675  for (i = 0; i < nb_input_streams; i++)
3676  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3677  for (i = 0; i < nb_output_streams; i++) {
3678  ost = output_streams[i];
3679  avcodec_close(ost->enc_ctx);
3680  }
3681  goto dump_format;
3682  }
3683 
3684  /* open each encoder */
3685  for (i = 0; i < nb_output_streams; i++) {
3686  // skip streams fed from filtergraphs until we have a frame for them
3687  if (output_streams[i]->filter)
3688  continue;
3689 
3690  ret = init_output_stream(output_streams[i], error, sizeof(error));
3691  if (ret < 0)
3692  goto dump_format;
3693  }
3694 
3695  /* discard unused programs */
3696  for (i = 0; i < nb_input_files; i++) {
3697  InputFile *ifile = input_files[i];
3698  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3699  AVProgram *p = ifile->ctx->programs[j];
3700  int discard = AVDISCARD_ALL;
3701 
3702  for (k = 0; k < p->nb_stream_indexes; k++)
3703  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3704  discard = AVDISCARD_DEFAULT;
3705  break;
3706  }
3707  p->discard = discard;
3708  }
3709  }
3710 
3711  /* write headers for files with no streams */
3712  for (i = 0; i < nb_output_files; i++) {
3713  oc = output_files[i]->ctx;
3714  if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3715  ret = check_init_output_file(output_files[i], i);
3716  if (ret < 0)
3717  goto dump_format;
3718  }
3719  }
3720 
3721  dump_format:
3722  /* dump the stream mapping */
3723  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3724  for (i = 0; i < nb_input_streams; i++) {
3725  ist = input_streams[i];
3726 
3727  for (j = 0; j < ist->nb_filters; j++) {
3728  if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3729  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3730  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3731  ist->filters[j]->name);
3732  if (nb_filtergraphs > 1)
3733  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3734  av_log(NULL, AV_LOG_INFO, "\n");
3735  }
3736  }
3737  }
3738 
3739  for (i = 0; i < nb_output_streams; i++) {
3740  ost = output_streams[i];
3741 
3742  if (ost->attachment_filename) {
3743  /* an attached file */
3744  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3745  ost->attachment_filename, ost->file_index, ost->index);
3746  continue;
3747  }
3748 
3749  if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3750  /* output from a complex graph */
3751  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3752  if (nb_filtergraphs > 1)
3753  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3754 
3755  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3756  ost->index, ost->enc ? ost->enc->name : "?");
3757  continue;
3758  }
3759 
3760  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3761  input_streams[ost->source_index]->file_index,
3762  input_streams[ost->source_index]->st->index,
3763  ost->file_index,
3764  ost->index);
3765  if (ost->sync_ist != input_streams[ost->source_index])
3766  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3767  ost->sync_ist->file_index,
3768  ost->sync_ist->st->index);
3769  if (ost->stream_copy)
3770  av_log(NULL, AV_LOG_INFO, " (copy)");
3771  else {
3772  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3773  const AVCodec *out_codec = ost->enc;
3774  const char *decoder_name = "?";
3775  const char *in_codec_name = "?";
3776  const char *encoder_name = "?";
3777  const char *out_codec_name = "?";
3778  const AVCodecDescriptor *desc;
3779 
3780  if (in_codec) {
3781  decoder_name = in_codec->name;
3782  desc = avcodec_descriptor_get(in_codec->id);
3783  if (desc)
3784  in_codec_name = desc->name;
3785  if (!strcmp(decoder_name, in_codec_name))
3786  decoder_name = "native";
3787  }
3788 
3789  if (out_codec) {
3790  encoder_name = out_codec->name;
3791  desc = avcodec_descriptor_get(out_codec->id);
3792  if (desc)
3793  out_codec_name = desc->name;
3794  if (!strcmp(encoder_name, out_codec_name))
3795  encoder_name = "native";
3796  }
3797 
3798  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3799  in_codec_name, decoder_name,
3800  out_codec_name, encoder_name);
3801  }
3802  av_log(NULL, AV_LOG_INFO, "\n");
3803  }
3804 
3805  if (ret) {
3806  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3807  return ret;
3808  }
3809 
3811 
3812  return 0;
3813 }
3814 
3815 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3816 static int need_output(void)
3817 {
3818  int i;
3819 
3820  for (i = 0; i < nb_output_streams; i++) {
3821  OutputStream *ost = output_streams[i];
3822  OutputFile *of = output_files[ost->file_index];
3823  AVFormatContext *os = output_files[ost->file_index]->ctx;
3824 
3825  if (ost->finished ||
3826  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3827  continue;
3828  if (ost->frame_number >= ost->max_frames) {
3829  int j;
3830  for (j = 0; j < of->ctx->nb_streams; j++)
3831  close_output_stream(output_streams[of->ost_index + j]);
3832  continue;
3833  }
3834 
3835  return 1;
3836  }
3837 
3838  return 0;
3839 }
3840 
3841 /**
3842  * Select the output stream to process.
3843  *
3844  * @return selected output stream, or NULL if none available
3845  */
3847 {
3848  int i;
3849  int64_t opts_min = INT64_MAX;
3850  OutputStream *ost_min = NULL;
3851 
3852  for (i = 0; i < nb_output_streams; i++) {
3853  OutputStream *ost = output_streams[i];
3854  int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3855  av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3856  AV_TIME_BASE_Q);
3857  if (ost->st->cur_dts == AV_NOPTS_VALUE)
3858  av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3859 
3860  if (!ost->initialized && !ost->inputs_done)
3861  return ost;
3862 
3863  if (!ost->finished && opts < opts_min) {
3864  opts_min = opts;
3865  ost_min = ost->unavailable ? NULL : ost;
3866  }
3867  }
3868  return ost_min;
3869 }
3870 
3871 static void set_tty_echo(int on)
3872 {
3873 #if HAVE_TERMIOS_H
3874  struct termios tty;
3875  if (tcgetattr(0, &tty) == 0) {
3876  if (on) tty.c_lflag |= ECHO;
3877  else tty.c_lflag &= ~ECHO;
3878  tcsetattr(0, TCSANOW, &tty);
3879  }
3880 #endif
3881 }
3882 
3884 {
3885  int i, ret, key;
3886  static int64_t last_time;
3887  if (received_nb_signals)
3888  return AVERROR_EXIT;
3889  /* read_key() returns 0 on EOF */
3890  if(cur_time - last_time >= 100000 && !run_as_daemon){
3891  key = read_key();
3892  last_time = cur_time;
3893  }else
3894  key = -1;
3895  if (key == 'q')
3896  return AVERROR_EXIT;
3897  if (key == '+') av_log_set_level(av_log_get_level()+10);
3898  if (key == '-') av_log_set_level(av_log_get_level()-10);
3899  if (key == 's') qp_hist ^= 1;
3900  if (key == 'h'){
3901  if (do_hex_dump){
3902  do_hex_dump = do_pkt_dump = 0;
3903  } else if(do_pkt_dump){
3904  do_hex_dump = 1;
3905  } else
3906  do_pkt_dump = 1;
3908  }
3909  if (key == 'c' || key == 'C'){
3910  char buf[4096], target[64], command[256], arg[256] = {0};
3911  double time;
3912  int k, n = 0;
3913  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3914  i = 0;
3915  set_tty_echo(1);
3916  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3917  if (k > 0)
3918  buf[i++] = k;
3919  buf[i] = 0;
3920  set_tty_echo(0);
3921  fprintf(stderr, "\n");
3922  if (k > 0 &&
3923  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3924  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3925  target, time, command, arg);
3926  for (i = 0; i < nb_filtergraphs; i++) {
3927  FilterGraph *fg = filtergraphs[i];
3928  if (fg->graph) {
3929  if (time < 0) {
3930  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3931  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3932  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3933  } else if (key == 'c') {
3934  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3935  ret = AVERROR_PATCHWELCOME;
3936  } else {
3937  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3938  if (ret < 0)
3939  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3940  }
3941  }
3942  }
3943  } else {
3945  "Parse error, at least 3 arguments were expected, "
3946  "only %d given in string '%s'\n", n, buf);
3947  }
3948  }
3949  if (key == 'd' || key == 'D'){
3950  int debug=0;
3951  if(key == 'D') {
3952  debug = input_streams[0]->st->codec->debug<<1;
3953  if(!debug) debug = 1;
3954  while(debug & (FF_DEBUG_DCT_COEFF
3955 #if FF_API_DEBUG_MV
3957 #endif
3958  )) //unsupported, would just crash
3959  debug += debug;
3960  }else{
3961  char buf[32];
3962  int k = 0;
3963  i = 0;
3964  set_tty_echo(1);
3965  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3966  if (k > 0)
3967  buf[i++] = k;
3968  buf[i] = 0;
3969  set_tty_echo(0);
3970  fprintf(stderr, "\n");
3971  if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3972  fprintf(stderr,"error parsing debug value\n");
3973  }
3974  for(i=0;i<nb_input_streams;i++) {
3975  input_streams[i]->st->codec->debug = debug;
3976  }
3977  for(i=0;i<nb_output_streams;i++) {
3978  OutputStream *ost = output_streams[i];
3979  ost->enc_ctx->debug = debug;
3980  }
3981  if(debug) av_log_set_level(AV_LOG_DEBUG);
3982  fprintf(stderr,"debug=%d\n", debug);
3983  }
3984  if (key == '?'){
3985  fprintf(stderr, "key function\n"
3986  "? show this help\n"
3987  "+ increase verbosity\n"
3988  "- decrease verbosity\n"
3989  "c Send command to first matching filter supporting it\n"
3990  "C Send/Queue command to all matching filters\n"
3991  "D cycle through available debug modes\n"
3992  "h dump packets/hex press to cycle through the 3 states\n"
3993  "q quit\n"
3994  "s Show QP histogram\n"
3995  );
3996  }
3997  return 0;
3998 }
3999 
4000 #if HAVE_PTHREADS
4001 static void *input_thread(void *arg)
4002 {
4003  InputFile *f = arg;
4004  unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
4005  int ret = 0;
4006 
4007  while (1) {
4008  AVPacket pkt;
4009  ret = av_read_frame(f->ctx, &pkt);
4010 
4011  if (ret == AVERROR(EAGAIN)) {
4012  av_usleep(10000);
4013  continue;
4014  }
4015  if (ret < 0) {
4016  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4017  break;
4018  }
4019  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4020  if (flags && ret == AVERROR(EAGAIN)) {
4021  flags = 0;
4022  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4024  "Thread message queue blocking; consider raising the "
4025  "thread_queue_size option (current value: %d)\n",
4026  f->thread_queue_size);
4027  }
4028  if (ret < 0) {
4029  if (ret != AVERROR_EOF)
4030  av_log(f->ctx, AV_LOG_ERROR,
4031  "Unable to send packet to main thread: %s\n",
4032  av_err2str(ret));
4033  av_packet_unref(&pkt);
4034  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4035  break;
4036  }
4037  }
4038 
4039  return NULL;
4040 }
4041 
4042 static void free_input_threads(void)
4043 {
4044  int i;
4045 
4046  for (i = 0; i < nb_input_files; i++) {
4047  InputFile *f = input_files[i];
4048  AVPacket pkt;
4049 
4050  if (!f || !f->in_thread_queue)
4051  continue;
4053  while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4054  av_packet_unref(&pkt);
4055 
4056  pthread_join(f->thread, NULL);
4057  f->joined = 1;
4058  av_thread_message_queue_free(&f->in_thread_queue);
4059  }
4060 }
4061 
4062 static int init_input_threads(void)
4063 {
4064  int i, ret;
4065 
4066  if (nb_input_files == 1)
4067  return 0;
4068 
4069  for (i = 0; i < nb_input_files; i++) {
4070  InputFile *f = input_files[i];
4071 
4072  if (f->ctx->pb ? !f->ctx->pb->seekable :
4073  strcmp(f->ctx->iformat->name, "lavfi"))
4074  f->non_blocking = 1;
4075  ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4076  f->thread_queue_size, sizeof(AVPacket));
4077  if (ret < 0)
4078  return ret;
4079 
4080  if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4081  av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4082  av_thread_message_queue_free(&f->in_thread_queue);
4083  return AVERROR(ret);
4084  }
4085  }
4086  return 0;
4087 }
4088 
4089 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4090 {
4091  return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4092  f->non_blocking ?
4094 }
4095 #endif
4096 
4098 {
4099  if (f->rate_emu) {
4100  int i;
4101  for (i = 0; i < f->nb_streams; i++) {
4102  InputStream *ist = input_streams[f->ist_index + i];
4103  int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4104  int64_t now = av_gettime_relative() - ist->start;
4105  if (pts > now)
4106  return AVERROR(EAGAIN);
4107  }
4108  }
4109 
4110 #if HAVE_PTHREADS
4111  if (nb_input_files > 1)
4112  return get_input_packet_mt(f, pkt);
4113 #endif
4114  return av_read_frame(f->ctx, pkt);
4115 }
4116 
4117 static int got_eagain(void)
4118 {
4119  int i;
4120  for (i = 0; i < nb_output_streams; i++)
4121  if (output_streams[i]->unavailable)
4122  return 1;
4123  return 0;
4124 }
4125 
4126 static void reset_eagain(void)
4127 {
4128  int i;
4129  for (i = 0; i < nb_input_files; i++)
4130  input_files[i]->eagain = 0;
4131  for (i = 0; i < nb_output_streams; i++)
4132  output_streams[i]->unavailable = 0;
4133 }
4134 
4135 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4136 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4137  AVRational time_base)
4138 {
4139  int ret;
4140 
4141  if (!*duration) {
4142  *duration = tmp;
4143  return tmp_time_base;
4144  }
4145 
4146  ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4147  if (ret < 0) {
4148  *duration = tmp;
4149  return tmp_time_base;
4150  }
4151 
4152  return time_base;
4153 }
4154 
4156 {
4157  InputStream *ist;
4158  AVCodecContext *avctx;
4159  int i, ret, has_audio = 0;
4160  int64_t duration = 0;
4161 
4162  ret = av_seek_frame(is, -1, is->start_time, 0);
4163  if (ret < 0)
4164  return ret;
4165 
4166  for (i = 0; i < ifile->nb_streams; i++) {
4167  ist = input_streams[ifile->ist_index + i];
4168  avctx = ist->dec_ctx;
4169 
4170  // flush decoders
4171  if (ist->decoding_needed) {
4172  process_input_packet(ist, NULL, 1);
4173  avcodec_flush_buffers(avctx);
4174  }
4175 
4176  /* duration is the length of the last frame in a stream
4177  * when audio stream is present we don't care about
4178  * last video frame length because it's not defined exactly */
4179  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4180  has_audio = 1;
4181  }
4182 
4183  for (i = 0; i < ifile->nb_streams; i++) {
4184  ist = input_streams[ifile->ist_index + i];
4185  avctx = ist->dec_ctx;
4186 
4187  if (has_audio) {
4188  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4189  AVRational sample_rate = {1, avctx->sample_rate};
4190 
4191  duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4192  } else
4193  continue;
4194  } else {
4195  if (ist->framerate.num) {
4196  duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
4197  } else if (ist->st->avg_frame_rate.num) {
4198  duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
4199  } else duration = 1;
4200  }
4201  if (!ifile->duration)
4202  ifile->time_base = ist->st->time_base;
4203  /* the total duration of the stream, max_pts - min_pts is
4204  * the duration of the stream without the last frame */
4205  if (ist->max_pts > ist->min_pts && ist->max_pts - (uint64_t)ist->min_pts < INT64_MAX - duration)
4206  duration += ist->max_pts - ist->min_pts;
4207  ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4208  ifile->time_base);
4209  }
4210 
4211  if (ifile->loop > 0)
4212  ifile->loop--;
4213 
4214  return ret;
4215 }
4216 
4217 /*
4218  * Return
4219  * - 0 -- one packet was read and processed
4220  * - AVERROR(EAGAIN) -- no packets were available for selected file,
4221  * this function should be called again
4222  * - AVERROR_EOF -- this function should not be called again
4223  */
4224 static int process_input(int file_index)
4225 {
4226  InputFile *ifile = input_files[file_index];
4227  AVFormatContext *is;
4228  InputStream *ist;
4229  AVPacket pkt;
4230  int ret, i, j;
4231  int64_t duration;
4232  int64_t pkt_dts;
4233 
4234  is = ifile->ctx;
4235  ret = get_input_packet(ifile, &pkt);
4236 
4237  if (ret == AVERROR(EAGAIN)) {
4238  ifile->eagain = 1;
4239  return ret;
4240  }
4241  if (ret < 0 && ifile->loop) {
4242  if ((ret = seek_to_start(ifile, is)) < 0)
4243  return ret;
4244  ret = get_input_packet(ifile, &pkt);
4245  if (ret == AVERROR(EAGAIN)) {
4246  ifile->eagain = 1;
4247  return ret;
4248  }
4249  }
4250  if (ret < 0) {
4251  if (ret != AVERROR_EOF) {
4252  print_error(is->filename, ret);
4253  if (exit_on_error)
4254  exit_program(1);
4255  }
4256 
4257  for (i = 0; i < ifile->nb_streams; i++) {
4258  ist = input_streams[ifile->ist_index + i];
4259  if (ist->decoding_needed) {
4260  ret = process_input_packet(ist, NULL, 0);
4261  if (ret>0)
4262  return 0;
4263  }
4264 
4265  /* mark all outputs that don't go through lavfi as finished */
4266  for (j = 0; j < nb_output_streams; j++) {
4267  OutputStream *ost = output_streams[j];
4268 
4269  if (ost->source_index == ifile->ist_index + i &&
4270  (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4271  finish_output_stream(ost);
4272  }
4273  }
4274 
4275  ifile->eof_reached = 1;
4276  return AVERROR(EAGAIN);
4277  }
4278 
4279  reset_eagain();
4280 
4281  if (do_pkt_dump) {
4283  is->streams[pkt.stream_index]);
4284  }
4285  /* the following test is needed in case new streams appear
4286  dynamically in stream : we ignore them */
4287  if (pkt.stream_index >= ifile->nb_streams) {
4288  report_new_stream(file_index, &pkt);
4289  goto discard_packet;
4290  }
4291 
4292  ist = input_streams[ifile->ist_index + pkt.stream_index];
4293 
4294  ist->data_size += pkt.size;
4295  ist->nb_packets++;
4296 
4297  if (ist->discard)
4298  goto discard_packet;
4299 
4300  if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
4301  av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
4302  exit_program(1);
4303  }
4304 
4305  if (debug_ts) {
4306  av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4307  "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4311  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4312  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4313  av_ts2str(input_files[ist->file_index]->ts_offset),
4314  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4315  }
4316 
4317  if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4318  int64_t stime, stime2;
4319  // Correcting starttime based on the enabled streams
4320  // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4321  // so we instead do it here as part of discontinuity handling
4322  if ( ist->next_dts == AV_NOPTS_VALUE
4323  && ifile->ts_offset == -is->start_time
4324  && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4325  int64_t new_start_time = INT64_MAX;
4326  for (i=0; i<is->nb_streams; i++) {
4327  AVStream *st = is->streams[i];
4328  if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4329  continue;
4330  new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4331  }
4332  if (new_start_time > is->start_time) {
4333  av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4334  ifile->ts_offset = -new_start_time;
4335  }
4336  }
4337 
4338  stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4339  stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4340  ist->wrap_correction_done = 1;
4341 
4342  if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4343  pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4344  ist->wrap_correction_done = 0;
4345  }
4346  if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4347  pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4348  ist->wrap_correction_done = 0;
4349  }
4350  }
4351 
4352  /* add the stream-global side data to the first packet */
4353  if (ist->nb_packets == 1) {
4354  for (i = 0; i < ist->st->nb_side_data; i++) {
4355  AVPacketSideData *src_sd = &ist->st->side_data[i];
4356  uint8_t *dst_data;
4357 
4358  if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4359  continue;
4360 
4361  if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4362  continue;
4363 
4364  dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4365  if (!dst_data)
4366  exit_program(1);
4367 
4368  memcpy(dst_data, src_sd->data, src_sd->size);
4369  }
4370  }
4371 
4372  if (pkt.dts != AV_NOPTS_VALUE)
4373  pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4374  if (pkt.pts != AV_NOPTS_VALUE)
4375  pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4376 
4377  if (pkt.pts != AV_NOPTS_VALUE)
4378  pkt.pts *= ist->ts_scale;
4379  if (pkt.dts != AV_NOPTS_VALUE)
4380  pkt.dts *= ist->ts_scale;
4381 
4383  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4385  pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4386  && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4387  int64_t delta = pkt_dts - ifile->last_ts;
4388  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4389  delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4390  ifile->ts_offset -= delta;
4392  "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4393  delta, ifile->ts_offset);
4394  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4395  if (pkt.pts != AV_NOPTS_VALUE)
4396  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4397  }
4398  }
4399 
4400  duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4401  if (pkt.pts != AV_NOPTS_VALUE) {
4402  pkt.pts += duration;
4403  ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4404  ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4405  }
4406 
4407  if (pkt.dts != AV_NOPTS_VALUE)
4408  pkt.dts += duration;
4409 
4411  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4413  pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4414  !copy_ts) {
4415  int64_t delta = pkt_dts - ist->next_dts;
4416  if (is->iformat->flags & AVFMT_TS_DISCONT) {
4417  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4418  delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4419  pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4420  ifile->ts_offset -= delta;
4422  "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4423  delta, ifile->ts_offset);
4424  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4425  if (pkt.pts != AV_NOPTS_VALUE)
4426  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4427  }
4428  } else {
4429  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4430  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4431  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4432  pkt.dts = AV_NOPTS_VALUE;
4433  }
4434  if (pkt.pts != AV_NOPTS_VALUE){
4435  int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4436  delta = pkt_pts - ist->next_dts;
4437  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4438  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4439  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4440  pkt.pts = AV_NOPTS_VALUE;
4441  }
4442  }
4443  }
4444  }
4445 
4446  if (pkt.dts != AV_NOPTS_VALUE)
4447  ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4448 
4449  if (debug_ts) {
4450  av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4452  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4453  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4454  av_ts2str(input_files[ist->file_index]->ts_offset),
4455  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4456  }
4457 
4458  sub2video_heartbeat(ist, pkt.pts);
4459 
4460  process_input_packet(ist, &pkt, 0);
4461 
4462 discard_packet:
4463  av_packet_unref(&pkt);
4464 
4465  return 0;
4466 }
4467 
4468 /**
4469  * Perform a step of transcoding for the specified filter graph.
4470  *
4471  * @param[in] graph filter graph to consider
4472  * @param[out] best_ist input stream where a frame would allow to continue
4473  * @return 0 for success, <0 for error
4474  */
4475 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4476 {
4477  int i, ret;
4478  int nb_requests, nb_requests_max = 0;
4479  InputFilter *ifilter;
4480  InputStream *ist;
4481 
4482  *best_ist = NULL;
4483  ret = avfilter_graph_request_oldest(graph->graph);
4484  if (ret >= 0)
4485  return reap_filters(0);
4486 
4487  if (ret == AVERROR_EOF) {
4488  ret = reap_filters(1);
4489  for (i = 0; i < graph->nb_outputs; i++)
4490  close_output_stream(graph->outputs[i]->ost);
4491  return ret;
4492  }
4493  if (ret != AVERROR(EAGAIN))
4494  return ret;
4495 
4496  for (i = 0; i < graph->nb_inputs; i++) {
4497  ifilter = graph->inputs[i];
4498  ist = ifilter->ist;
4499  if (input_files[ist->file_index]->eagain ||
4500  input_files[ist->file_index]->eof_reached)
4501  continue;
4502  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4503  if (nb_requests > nb_requests_max) {
4504  nb_requests_max = nb_requests;
4505  *best_ist = ist;
4506  }
4507  }
4508 
4509  if (!*best_ist)
4510  for (i = 0; i < graph->nb_outputs; i++)
4511  graph->outputs[i]->ost->unavailable = 1;
4512 
4513  return 0;
4514 }
4515 
4516 /**
4517  * Run a single step of transcoding.
4518  *
4519  * @return 0 for success, <0 for error
4520  */
4521 static int transcode_step(void)
4522 {
4523  OutputStream *ost;
4524  InputStream *ist = NULL;
4525  int ret;
4526 
4527  ost = choose_output();
4528  if (!ost) {
4529  if (got_eagain()) {
4530  reset_eagain();
4531  av_usleep(10000);
4532  return 0;
4533  }
4534  av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4535  return AVERROR_EOF;
4536  }
4537 
4538  if (ost->filter && !ost->filter->graph->graph) {
4540  ret = configure_filtergraph(ost->filter->graph);
4541  if (ret < 0) {
4542  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4543  return ret;
4544  }
4545  }
4546  }
4547 
4548  if (ost->filter && ost->filter->graph->graph) {
4549  if (!ost->initialized) {
4550  char error[1024] = {0};
4551  ret = init_output_stream(ost, error, sizeof(error));
4552  if (ret < 0) {
4553  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
4554  ost->file_index, ost->index, error);
4555  exit_program(1);
4556  }
4557  }
4558  if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4559  return ret;
4560  if (!ist)
4561  return 0;
4562  } else if (ost->filter) {
4563  int i;
4564  for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4565  InputFilter *ifilter = ost->filter->graph->inputs[i];
4566  if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4567  ist = ifilter->ist;
4568  break;
4569  }
4570  }
4571  if (!ist) {
4572  ost->inputs_done = 1;
4573  return 0;
4574  }
4575  } else {
4576  av_assert0(ost->source_index >= 0);
4577  ist = input_streams[ost->source_index];
4578  }
4579 
4580  ret = process_input(ist->file_index);
4581  if (ret == AVERROR(EAGAIN)) {
4582  if (input_files[ist->file_index]->eagain)
4583  ost->unavailable = 1;
4584  return 0;
4585  }
4586 
4587  if (ret < 0)
4588  return ret == AVERROR_EOF ? 0 : ret;
4589 
4590  return reap_filters(0);
4591 }
4592 
4593 /*
4594  * The following code is the main loop of the file converter
4595  */
4596 static int transcode(void)
4597 {
4598  int ret, i;
4599  AVFormatContext *os;
4600  OutputStream *ost;
4601  InputStream *ist;
4602  int64_t timer_start;
4603  int64_t total_packets_written = 0;
4604 
4605  ret = transcode_init();
4606  if (ret < 0)
4607  goto fail;
4608 
4609  if (stdin_interaction) {
4610  av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4611  }
4612 
4613  timer_start = av_gettime_relative();
4614 
4615 #if HAVE_PTHREADS
4616  if ((ret = init_input_threads()) < 0)
4617  goto fail;
4618 #endif
4619 
4620  while (!received_sigterm) {
4621  int64_t cur_time= av_gettime_relative();
4622 
4623  /* if 'q' pressed, exits */
4624  if (stdin_interaction)
4625  if (check_keyboard_interaction(cur_time) < 0)
4626  break;
4627 
4628  /* check if there's any stream where output is still needed */
4629  if (!need_output()) {
4630  av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4631  break;
4632  }
4633 
4634  ret = transcode_step();
4635  if (ret < 0 && ret != AVERROR_EOF) {
4636  char errbuf[128];
4637  av_strerror(ret, errbuf, sizeof(errbuf));
4638 
4639  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4640  break;
4641  }
4642 
4643  /* dump report by using the output first video and audio streams */
4644  print_report(0, timer_start, cur_time);
4645  }
4646 #if HAVE_PTHREADS
4647  free_input_threads();
4648 #endif
4649 
4650  /* at the end of stream, we must flush the decoder buffers */
4651  for (i = 0; i < nb_input_streams; i++) {
4652  ist = input_streams[i];
4653  if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4654  process_input_packet(ist, NULL, 0);
4655  }
4656  }
4657  flush_encoders();
4658 
4659  term_exit();
4660 
4661  /* write the trailer if needed and close file */
4662  for (i = 0; i < nb_output_files; i++) {
4663  os = output_files[i]->ctx;
4664  if (!output_files[i]->header_written) {
4666  "Nothing was written into output file %d (%s), because "
4667  "at least one of its streams received no packets.\n",
4668  i, os->filename);
4669  continue;
4670  }
4671  if ((ret = av_write_trailer(os)) < 0) {
4672  av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->filename, av_err2str(ret));
4673  if (exit_on_error)
4674  exit_program(1);
4675  }
4676  }
4677 
4678  /* dump report by using the first video and audio streams */
4679  print_report(1, timer_start, av_gettime_relative());
4680 
4681  /* close each encoder */
4682  for (i = 0; i < nb_output_streams; i++) {
4683  ost = output_streams[i];
4684  if (ost->encoding_needed) {
4685  av_freep(&ost->enc_ctx->stats_in);
4686  }
4687  total_packets_written += ost->packets_written;
4688  }
4689 
4690  if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4691  av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4692  exit_program(1);
4693  }
4694 
4695  /* close each decoder */
4696  for (i = 0; i < nb_input_streams; i++) {
4697  ist = input_streams[i];
4698  if (ist->decoding_needed) {
4699  avcodec_close(ist->dec_ctx);
4700  if (ist->hwaccel_uninit)
4701  ist->hwaccel_uninit(ist->dec_ctx);
4702  }
4703  }
4704 
4707 
4708  /* finished ! */
4709  ret = 0;
4710 
4711  fail:
4712 #if HAVE_PTHREADS
4713  free_input_threads();
4714 #endif
4715 
4716  if (output_streams) {
4717  for (i = 0; i < nb_output_streams; i++) {
4718  ost = output_streams[i];
4719  if (ost) {
4720  if (ost->logfile) {
4721  if (fclose(ost->logfile))
4723  "Error closing logfile, loss of information possible: %s\n",
4724  av_err2str(AVERROR(errno)));
4725  ost->logfile = NULL;
4726  }
4727  av_freep(&ost->forced_kf_pts);
4728  av_freep(&ost->apad);
4729  av_freep(&ost->disposition);
4730  av_dict_free(&ost->encoder_opts);
4731  av_dict_free(&ost->sws_dict);
4732  av_dict_free(&ost->swr_opts);
4733  av_dict_free(&ost->resample_opts);
4734  }
4735  }
4736  }
4737  return ret;
4738 }
4739 
4740 
4741 static int64_t getutime(void)
4742 {
4743 #if HAVE_GETRUSAGE
4744  struct rusage rusage;
4745 
4746  getrusage(RUSAGE_SELF, &rusage);
4747  return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4748 #elif HAVE_GETPROCESSTIMES
4749  HANDLE proc;
4750  FILETIME c, e, k, u;
4751  proc = GetCurrentProcess();
4752  GetProcessTimes(proc, &c, &e, &k, &u);
4753  return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4754 #else
4755  return av_gettime_relative();
4756 #endif
4757 }
4758 
4759 static int64_t getmaxrss(void)
4760 {
4761 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4762  struct rusage rusage;
4763  getrusage(RUSAGE_SELF, &rusage);
4764  return (int64_t)rusage.ru_maxrss * 1024;
4765 #elif HAVE_GETPROCESSMEMORYINFO
4766  HANDLE proc;
4767  PROCESS_MEMORY_COUNTERS memcounters;
4768  proc = GetCurrentProcess();
4769  memcounters.cb = sizeof(memcounters);
4770  GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4771  return memcounters.PeakPagefileUsage;
4772 #else
4773  return 0;
4774 #endif
4775 }
4776 
4777 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4778 {
4779 }
4780 
4781 int main(int argc, char **argv)
4782 {
4783  int i, ret;
4784  int64_t ti;
4785 
4786  init_dynload();
4787 
4789 
4790  setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4791 
4793  parse_loglevel(argc, argv, options);
4794 
4795  if(argc>1 && !strcmp(argv[1], "-d")){
4796  run_as_daemon=1;
4798  argc--;
4799  argv++;
4800  }
4801 
4803 #if CONFIG_AVDEVICE
4805 #endif
4807  av_register_all();
4809 
4810  show_banner(argc, argv, options);
4811 
4812  /* parse options and open all input/output files */
4813  ret = ffmpeg_parse_options(argc, argv);
4814  if (ret < 0)
4815  exit_program(1);
4816 
4817  if (nb_output_files <= 0 && nb_input_files == 0) {
4818  show_usage();
4819  av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4820  exit_program(1);
4821  }
4822 
4823  /* file converter / grab */
4824  if (nb_output_files <= 0) {
4825  av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4826  exit_program(1);
4827  }
4828 
4829 // if (nb_input_files == 0) {
4830 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4831 // exit_program(1);
4832 // }
4833 
4834  for (i = 0; i < nb_output_files; i++) {
4835  if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4836  want_sdp = 0;
4837  }
4838 
4839  current_time = ti = getutime();
4840  if (transcode() < 0)
4841  exit_program(1);
4842  ti = getutime() - ti;
4843  if (do_benchmark) {
4844  av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4845  }
4846  av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4849  exit_program(69);
4850 
4852  return main_return_code;
4853 }
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1556
int64_t pts
current pts of the decoded frame (in AV_TIME_BASE units)
Definition: ffmpeg.h:323
int nb_bitstream_filters
Definition: ffmpeg.h:469
#define extra_bits(eb)
Definition: intrax8.c:159
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:938
void init_dynload(void)
Initialize dynamic library loading.
Definition: cmdutils.c:119
enum AVPixelFormat(* get_format)(struct AVCodecContext *s, const enum AVPixelFormat *fmt)
callback to negotiate the pixelFormat
Definition: avcodec.h:2039
AVRational enc_timebase
Definition: ffmpeg.h:467
uint32_t BOOL
#define FF_DEBUG_VIS_MB_TYPE
Definition: avcodec.h:3026
static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
Definition: ffmpeg.c:672
int got_output
Definition: ffmpeg.h:348
#define AV_DISPOSITION_METADATA
Definition: avformat.h:873
void av_bsf_free(AVBSFContext **ctx)
Free a bitstream filter context and everything associated with it; write NULL into the supplied point...
Definition: bsf.c:35
static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
Definition: ffmpeg.c:1989
int64_t first_dts
Timestamp corresponding to the last dts sync point.
Definition: avformat.h:1065
int guess_input_channel_layout(InputStream *ist)
Definition: ffmpeg.c:2103
#define ATOMIC_VAR_INIT(value)
Definition: stdatomic.h:31
#define NULL
Definition: coverity.c:32
int frame_number
Definition: ffmpeg.h:455
const struct AVCodec * codec
Definition: avcodec.h:1770
Definition: ffmpeg.h:432
AVRational framerate
Definition: avcodec.h:3460
enum AVFieldOrder field_order
Video only.
Definition: avcodec.h:4233
static void output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int eof)
Definition: ffmpeg.c:829
static void do_subtitle_out(OutputFile *of, OutputStream *ost, AVSubtitle *sub)
Definition: ffmpeg.c:953
enum HWAccelID active_hwaccel_id
Definition: ffmpeg.h:376
const char * s
Definition: avisynth_c.h:768
Bytestream IO Context.
Definition: avio.h:161
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static enum AVPixelFormat pix_fmt
int64_t recording_time
desired length of the resulting file in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:567
void term_init(void)
Definition: ffmpeg.c:373
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:334
AVCodecParameters * par_out
Parameters of the output stream.
Definition: avcodec.h:5948
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:94
uint8_t * name
Definition: ffmpeg.h:270
int nb_outputs
Definition: ffmpeg.h:299
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:125
int size
AVDictionary * swr_opts
Definition: ffmpeg.h:515
#define DECODING_FOR_FILTER
Definition: ffmpeg.h:309
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2419
#define av_realloc_f(p, o, n)
This structure describes decoded (raw) audio or video data.
Definition: frame.h:201
int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts)
Queue a command for one or more filter instances.
static int init_output_stream_streamcopy(OutputStream *ost)
Definition: ffmpeg.c:3052
void term_exit(void)
Definition: ffmpeg.c:315
int stream_copy
Definition: ffmpeg.h:520
int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt)
Write a packet to an output media file ensuring correct interleaving.
Definition: mux.c:1239
int x
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:4094
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1605
#define atomic_store(object, desired)
Definition: stdatomic.h:85
AVOption.
Definition: opt.h:246
AVRational frame_rate
Definition: ffmpeg.h:484
int64_t * forced_kf_pts
Definition: ffmpeg.h:494
void av_thread_message_queue_set_err_recv(AVThreadMessageQueue *mq, int err)
Set the receiving error code.
static void flush(AVCodecContext *avctx)
static void sub2video_flush(InputStream *ist)
Definition: ffmpeg.c:295
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:3101
AVBufferRef * hw_frames_ctx
Definition: ffmpeg.h:383
char * filters
filtergraph associated to the -filter option
Definition: ffmpeg.h:510
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:917
static int process_input(int file_index)
Definition: ffmpeg.c:4224
int exit_on_error
Definition: ffmpeg_opt.c:128
int64_t cfr_next_pts
Definition: ffmpeg.h:333
const char * fmt
Definition: avisynth_c.h:769
static int init_output_stream(OutputStream *ost, char *error, int error_len)
Definition: ffmpeg.c:3445
static atomic_int transcode_init_done
Definition: ffmpeg.c:323
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
int64_t bit_rate
the average bitrate
Definition: avcodec.h:1826
#define LIBAVUTIL_VERSION_INT
Definition: version.h:86
Main libavfilter public API header.
int64_t pos
byte position in stream, -1 if unknown
Definition: avcodec.h:1699
#define AV_DICT_DONT_OVERWRITE
Don&#39;t overwrite existing entries.
Definition: dict.h:79
static int run_as_daemon
Definition: ffmpeg.c:131
Memory buffer source API.
const char * desc
Definition: nvenc.c:60
void av_log_set_level(int level)
Set the log level.
Definition: log.c:391
AVRational framerate
Definition: ffmpeg.h:340
AVRational sample_aspect_ratio
Video only.
Definition: avcodec.h:4228
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Definition: encode.c:430
int height
Definition: ffmpeg.h:254
AVCodecParserContext * parser
Definition: ffmpeg.h:536
static int64_t cur_time
Definition: ffserver.c:252
int64_t max_pts
Definition: ffmpeg.h:329
int decoding_needed
Definition: ffmpeg.h:307
enum AVCodecID codec_id
Specific type of the encoded data (the codec used).
Definition: avcodec.h:4152
void sub2video_update(InputStream *ist, AVSubtitle *sub)
Definition: ffmpeg.c:232
const struct AVBitStreamFilter * filter
The bitstream filter this context is an instance of.
Definition: avcodec.h:5923
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown)
Definition: avformat.h:959
void av_codec_set_pkt_timebase(AVCodecContext *avctx, AVRational val)
int num
Numerator.
Definition: rational.h:59
The bitstream filter state.
Definition: avcodec.h:5914
static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
Definition: ffmpeg.c:1647
#define vsnprintf
Definition: snprintf.h:36
int rotate_overridden
Definition: ffmpeg.h:488
int index
stream index in AVFormatContext
Definition: avformat.h:890
int size
Definition: avcodec.h:1680
static int64_t getmaxrss(void)
Definition: ffmpeg.c:4759
int max_muxing_queue_size
Definition: ffmpeg.h:551
const char * b
Definition: vf_curves.c:113
static int nb_frames_dup
Definition: ffmpeg.c:132
static InputStream * get_input_stream(OutputStream *ost)
Definition: ffmpeg.c:2957
void hw_device_free_all(void)
Definition: ffmpeg_hw.c:217
#define AVIO_FLAG_WRITE
write-only
Definition: avio.h:661
#define AV_DISPOSITION_DUB
Definition: avformat.h:837
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:2172
int eagain
Definition: ffmpeg.h:403
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
Definition: cmdutils.c:1150
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1989
static int init_output_stream_encode(OutputStream *ost)
Definition: ffmpeg.c:3293
static void abort_codec_experimental(AVCodec *c, int encoder)
Definition: ffmpeg.c:641
#define AV_DISPOSITION_HEARING_IMPAIRED
stream for hearing impaired audiences
Definition: avformat.h:849
void(* hwaccel_uninit)(AVCodecContext *s)
Definition: ffmpeg.h:378
int quality
Definition: ffmpeg.h:549
unsigned num_rects
Definition: avcodec.h:4132
AVFrame * filter_frame
Definition: ffmpeg.h:314
static int transcode_init(void)
Definition: ffmpeg.c:3643
static int compare_int64(const void *a, const void *b)
Definition: ffmpeg.c:2964
static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
Definition: ffmpeg.c:2602
char * stats_in
pass2 encoding statistics input buffer Concatenated stuff from stats_out of pass1 should be placed he...
Definition: avcodec.h:2939
int do_benchmark_all
Definition: ffmpeg_opt.c:121
#define AV_DISPOSITION_CLEAN_EFFECTS
stream without voice
Definition: avformat.h:851
int last_dropped
Definition: ffmpeg.h:478
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:672
discard all
Definition: avcodec.h:830
AVPacketSideData * side_data
An array of side data that applies to the whole stream (i.e.
Definition: avformat.h:999
int64_t input_ts_offset
Definition: ffmpeg.h:409
int do_hex_dump
Definition: ffmpeg_opt.c:122
static AVPacket pkt
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:3164
int nb_input_streams
Definition: ffmpeg.c:145
void avcodec_register_all(void)
Register all the codecs, parsers and bitstream filters which were enabled at configuration time...
Definition: allcodecs.c:755
const char * name
Definition: ffmpeg.h:75
intptr_t atomic_int
Definition: stdatomic.h:55
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
Definition: decode.c:1002
static void report_new_stream(int input_index, AVPacket *pkt)
Definition: ffmpeg.c:3628
Picture data structure.
Definition: avcodec.h:4061
#define src
Definition: vp8dsp.c:254
uint64_t packets_written
Definition: ffmpeg.h:543
AVCodec.
Definition: avcodec.h:3739
#define VSYNC_VFR
Definition: ffmpeg.h:55
int nb_dts_buffer
Definition: ffmpeg.h:395
#define AVFMT_TS_NONSTRICT
Format does not require strictly increasing timestamps, but they must still be monotonic.
Definition: avformat.h:496
This struct describes the properties of an encoded stream.
Definition: avcodec.h:4144
int print_stats
Definition: ffmpeg_opt.c:130
int av_bsf_init(AVBSFContext *ctx)
Prepare the filter for use, after all the parameters and options have been set.
Definition: bsf.c:135
float dts_error_threshold
Definition: ffmpeg_opt.c:113
int64_t start_time
start time in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:568
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:235
int index
Definition: ffmpeg.h:290
uint64_t data_size
Definition: ffmpeg.h:541
static int decode_interrupt_cb(void *ctx)
Definition: ffmpeg.c:459
AVBSFContext ** bsf_ctx
Definition: ffmpeg.h:470
int av_fifo_generic_write(AVFifoBuffer *f, void *src, int size, int(*func)(void *, void *, int))
Feed data from a user-supplied callback to an AVFifoBuffer.
Definition: fifo.c:122
void av_display_rotation_set(int32_t matrix[9], double angle)
Initialize a transformation matrix describing a pure counterclockwise rotation by the specified angle...
Definition: display.c:50
#define AV_DISPOSITION_KARAOKE
Definition: avformat.h:841
struct FilterGraph * graph
Definition: ffmpeg.h:245
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1898
static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
Definition: ffmpeg.c:2286
Undefined.
Definition: avutil.h:273
AVSubtitleRect ** rects
Definition: avcodec.h:4133
int encoding_needed
Definition: ffmpeg.h:454
static void update_benchmark(const char *fmt,...)
Definition: ffmpeg.c:646
static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
Definition: ffmpeg.c:4777
AVBufferRef * hw_frames_ctx
For hwaccel-format frames, this should be a reference to the AVHWFramesContext describing the frame...
Definition: frame.h:538
static int check_keyboard_interaction(int64_t cur_time)
Definition: ffmpeg.c:3883
Format I/O context.
Definition: avformat.h:1349
uint64_t samples_decoded
Definition: ffmpeg.h:392
memory buffer sink API for audio and video
struct InputStream * ist
Definition: ffmpeg.h:244
static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof, int *decode_failed)
Definition: ffmpeg.c:2376
#define AV_RL64
Definition: intreadwrite.h:173
unsigned int nb_stream_indexes
Definition: avformat.h:1281
#define AV_LOG_QUIET
Print no output.
Definition: log.h:158
enum HWAccelID id
Definition: ffmpeg.h:77
int64_t cur_dts
Definition: avformat.h:1066
int w
width of pict, undefined when pict is not set
Definition: avcodec.h:4096
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: avcodec.h:1027
uint64_t frames_decoded
Definition: ffmpeg.h:391
int header_written
Definition: ffmpeg.h:573
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AVFilterGraph * graph
Definition: ffmpeg.h:293
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
Public dictionary API.
char * logfile_prefix
Definition: ffmpeg.h:505
static uint8_t * subtitle_out
Definition: ffmpeg.c:142
int av_bsf_receive_packet(AVBSFContext *ctx, AVPacket *pkt)
Retrieve a filtered packet.
Definition: bsf.c:197
static int main_return_code
Definition: ffmpeg.c:325
static int64_t start_time
Definition: ffplay.c:327
int copy_initial_nonkeyframes
Definition: ffmpeg.h:530
void register_exit(void(*cb)(int ret))
Register a program-specific cleanup routine.
Definition: cmdutils.c:130
static int init_output_bsfs(OutputStream *ost)
Definition: ffmpeg.c:3016
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:2531
int64_t * dts_buffer
Definition: ffmpeg.h:394
int flags
can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_GLOBALHEADER, AVFMT_NOTIMESTAMPS, AVFMT_VARIABLE_FPS, AVFMT_NODIMENSIONS, AVFMT_NOSTREAMS, AVFMT_ALLOW_FLUSH, AVFMT_TS_NONSTRICT, AVFMT_TS_NEGATIVE
Definition: avformat.h:543
uint8_t
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:150
Opaque data information usually continuous.
Definition: avutil.h:203
AVDictionary * sws_dict
Definition: ffmpeg.h:514
int av_thread_message_queue_recv(AVThreadMessageQueue *mq, void *msg, unsigned flags)
Receive a message from the queue.
int width
Video only.
Definition: avcodec.h:4218
int av_parser_change(AVCodecParserContext *s, AVCodecContext *avctx, uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size, int keyframe)
Definition: parser.c:206
float delta
static void sub2video_push_ref(InputStream *ist, int64_t pts)
Definition: ffmpeg.c:219
AVOptions.
int subtitle_header_size
Definition: avcodec.h:3397
int flags
Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS, AVFMT_GENERIC_INDEX, AVFMT_TS_DISCONT, AVFMT_NOBINSEARCH, AVFMT_NOGENSEARCH, AVFMT_NO_BYTE_SEEK, AVFMT_SEEK_TO_PTS.
Definition: avformat.h:678
enum AVCodecID av_codec_get_id(const struct AVCodecTag *const *tags, unsigned int tag)
Get the AVCodecID for the given codec tag tag.
miscellaneous OS support macros and functions.
timestamp utils, mostly useful for debugging/logging purposes
int avformat_transfer_internal_stream_timing_info(const AVOutputFormat *ofmt, AVStream *ost, const AVStream *ist, enum AVTimebaseSource copy_tb)
Transfer internal timing information from one stream to another.
Definition: utils.c:5549
int stdin_interaction
Definition: ffmpeg_opt.c:132
FILE * logfile
Definition: ffmpeg.h:506
AVDictionary * opts
Definition: ffmpeg.h:565
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
#define media_type_string
Definition: cmdutils.h:637
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: avcodec.h:1697
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: avcodec.h:1473
int av_thread_message_queue_send(AVThreadMessageQueue *mq, void *msg, unsigned flags)
Send a message on the queue.
#define ECHO(name, type, min, max)
Definition: af_aecho.c:186
static const HWAccel * get_hwaccel(enum AVPixelFormat pix_fmt)
Definition: ffmpeg.c:2828
#define FF_API_DEBUG_MV
Definition: version.h:82
static int need_output(void)
Definition: ffmpeg.c:3816
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:395
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:294
int av_fifo_space(const AVFifoBuffer *f)
Return the amount of space in bytes in the AVFifoBuffer, that is the amount of data you can write int...
Definition: fifo.c:82
int nb_side_data
The number of elements in the AVStream.side_data array.
Definition: avformat.h:1003
static double psnr(double d)
Definition: ffmpeg.c:1355
int do_benchmark
Definition: ffmpeg_opt.c:120
int audio_sync_method
Definition: ffmpeg_opt.c:116
static void filter(int16_t *output, ptrdiff_t out_stride, int16_t *low, ptrdiff_t low_stride, int16_t *high, ptrdiff_t high_stride, int len, uint8_t clip)
Definition: cfhd.c:80
int shortest
Definition: ffmpeg.h:571
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1417
int64_t duration
Definition: movenc.c:63
int avcodec_parameters_to_context(AVCodecContext *codec, const AVCodecParameters *par)
Fill the codec context based on the values from the supplied codec parameters.
Definition: utils.c:2421
int av_codec_get_tag2(const struct AVCodecTag *const *tags, enum AVCodecID id, unsigned int *tag)
Get the codec tag for the given codec id.
void avfilter_register_all(void)
Initialize the filter system.
Definition: allfilters.c:407
static int64_t getutime(void)
Definition: ffmpeg.c:4741
static AVFrame * frame
void av_pkt_dump_log2(void *avcl, int level, const AVPacket *pkt, int dump_payload, const AVStream *st)
Send a nice dump of a packet to the log.
Definition: dump.c:113
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
const char * name
Definition: avcodec.h:5964
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:40
static void finish(void)
Definition: movenc.c:344
int nb_streams
Definition: ffmpeg.h:416
uint8_t * data
Definition: avcodec.h:1679
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
enum AVMediaType type
Definition: ffmpeg.h:247
static void set_tty_echo(int on)
Definition: ffmpeg.c:3871
AVDictionary * resample_opts
Definition: ffmpeg.h:516
static void parse_forced_key_frames(char *kf, OutputStream *ost, AVCodecContext *avctx)
Definition: ffmpeg.c:3206
static int flags
Definition: log.c:57
#define FFMIN3(a, b, c)
Definition: common.h:97
AVFilterContext * filter
Definition: ffmpeg.h:267
static int seek_to_start(InputFile *ifile, AVFormatContext *is)
Definition: ffmpeg.c:4155
int avformat_network_init(void)
Do global initialization of network components.
Definition: utils.c:4867
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:91
int * formats
Definition: ffmpeg.h:284
#define ff_dlog(a,...)
int nb_input_files
Definition: ffmpeg.c:147
#define AVERROR_EOF
End of file.
Definition: error.h:55
static int read_key(void)
Definition: ffmpeg.c:408
static int reap_filters(int flush)
Get and encode new output from any of the filtergraphs, without causing activity. ...
Definition: ffmpeg.c:1424
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
#define lrintf(x)
Definition: libm_mips.h:70
static volatile int ffmpeg_exited
Definition: ffmpeg.c:324
int av_sdp_create(AVFormatContext *ac[], int n_files, char *buf, int size)
Generate an SDP for an RTP session.
Definition: sdp.c:835
static void do_video_stats(OutputStream *ost, int frame_size)
Definition: ffmpeg.c:1360
uint8_t * data
Definition: avcodec.h:1623
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:348
void parse_loglevel(int argc, char **argv, const OptionDef *options)
Find the &#39;-loglevel&#39; option in the command line args and apply it.
Definition: cmdutils.c:505
int h
height of pict, undefined when pict is not set
Definition: avcodec.h:4097
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:556
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
AVCodec * dec
Definition: ffmpeg.h:312
AVBufferRef * av_buffersink_get_hw_frames_ctx(const AVFilterContext *ctx)
enum AVDiscard discard
selects which program to discard and which to feed to the caller
Definition: avformat.h:1279
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:2931
void avio_write(AVIOContext *s, const unsigned char *buf, int size)
Definition: aviobuf.c:216
int top_field_first
Definition: ffmpeg.h:341
#define AVFMT_FLAG_BITEXACT
When muxing, try to avoid writing any random/volatile data to the output.
Definition: avformat.h:1477
int nb_output_streams
Definition: ffmpeg.c:150
int file_index
Definition: ffmpeg.h:303
void * av_realloc_array(void *ptr, size_t nmemb, size_t size)
Allocate, reallocate, or free an array.
Definition: mem.c:184
const OptionDef options[]
Definition: ffserver.c:3948
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:2505
unsigned int * stream_index
Definition: avformat.h:1280
int av_buffersink_get_h(const AVFilterContext *ctx)
struct InputStream::sub2video sub2video
int av_buffersink_get_format(const AVFilterContext *ctx)
int wrap_correction_done
Definition: ffmpeg.h:324
int64_t filter_in_rescale_delta_last
Definition: ffmpeg.h:326
uint64_t channel_layout
Audio only.
Definition: avcodec.h:4254
#define av_log(a,...)
static void sub2video_heartbeat(InputStream *ist, int64_t pts)
Definition: ffmpeg.c:266
#define AV_DISPOSITION_CAPTIONS
To specify text track kind (different from subtitles default).
Definition: avformat.h:871
#define AV_BUFFERSINK_FLAG_NO_REQUEST
Tell av_buffersink_get_buffer_ref() not to request a frame from its input.
Definition: buffersink.h:60
struct AVOutputFormat * oformat
The output container format.
Definition: avformat.h:1368
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
Definition: avpacket.c:627
int64_t next_dts
Definition: ffmpeg.h:319
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: avcodec.h:1711
attribute_deprecated int av_copy_packet_side_data(AVPacket *dst, const AVPacket *src)
Copy packet side data.
Definition: avpacket.c:226
attribute_deprecated int avpicture_fill(AVPicture *picture, const uint8_t *ptr, enum AVPixelFormat pix_fmt, int width, int height)
Definition: avpicture.c:37
void av_buffer_default_free(void *opaque, uint8_t *data)
Default free callback, which calls av_free() on the buffer data.
Definition: buffer.c:62
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate, streams, container, programs, metadata, side data, codec and time base.
Definition: dump.c:555
AVFifoBuffer * sub_queue
queue of AVSubtitle* before filter init
Definition: ffmpeg.h:356
Main libavdevice API header.
Callback for checking whether to abort blocking functions.
Definition: avio.h:58
int avcodec_close(AVCodecContext *avctx)
Close a given AVCodecContext and free all the data associated with it (but not the AVCodecContext its...
Definition: utils.c:1210
static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output, int *decode_failed)
Definition: ffmpeg.c:2509
AVRational av_stream_get_codec_timebase(const AVStream *st)
Get the internal codec timebase from a stream.
Definition: utils.c:5611
libswresample public header
enum AVCodecID id
Definition: avcodec.h:3753
int(* hwaccel_retrieve_data)(AVCodecContext *s, AVFrame *frame)
Definition: ffmpeg.h:380
AVRational sample_aspect_ratio
Definition: ffmpeg.h:255
const AVCodecDefault * defaults
Private codec-specific defaults.
Definition: avcodec.h:3800
int rate_emu
Definition: ffmpeg.h:419
int avcodec_parameters_copy(AVCodecParameters *dst, const AVCodecParameters *src)
Copy the contents of src to dst.
Definition: utils.c:2346
int width
Definition: frame.h:259
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
#define AV_DICT_MATCH_CASE
Only get an entry with exact-case key match.
Definition: dict.h:69
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:2083
int sample_rate
Definition: ffmpeg.h:257
static void finish_output_stream(OutputStream *ost)
Definition: ffmpeg.c:1405
static void reset_eagain(void)
Definition: ffmpeg.c:4126
static AVBufferRef * hw_device_ctx
Definition: hw_decode.c:43
enum AVPixelFormat hwaccel_pix_fmt
Definition: ffmpeg.h:381
void av_packet_rescale_ts(AVPacket *pkt, AVRational tb_src, AVRational tb_dst)
Convert valid timing fields (timestamps / durations) in a packet from one timebase to another...
Definition: avpacket.c:681
int ffmpeg_parse_options(int argc, char **argv)
Definition: ffmpeg_opt.c:3295
FilterGraph ** filtergraphs
Definition: ffmpeg.c:154
const AVIOInterruptCB int_cb
Definition: ffmpeg.c:464
AVFilterContext * filter
Definition: ffmpeg.h:243
#define AVFMT_TS_DISCONT
Format allows timestamp discontinuities.
Definition: avformat.h:488
#define atomic_load(object)
Definition: stdatomic.h:93
int64_t start
Definition: ffmpeg.h:316
int loop
Definition: ffmpeg.h:405
int y
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:4095
av_default_item_name
uint64_t nb_packets
Definition: ffmpeg.h:389
#define AVERROR(e)
Definition: error.h:43
int64_t last_mux_dts
Definition: ffmpeg.h:464
int video_sync_method
Definition: ffmpeg_opt.c:117
int format
Definition: ffmpeg.h:252
int av_opt_eval_flags(void *obj, const AVOption *o, const char *val, int *flags_out)
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, int *size)
Get side information from packet.
Definition: avpacket.c:350
static int64_t decode_error_stat[2]
Definition: ffmpeg.c:135
#define VSYNC_VSCFR
Definition: ffmpeg.h:56
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:163
char * sdp_filename
Definition: ffmpeg_opt.c:109
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:76
#define FALSE
Definition: windows2linux.h:37
int last_nb0_frames[3]
Definition: ffmpeg.h:479
Display matrix.
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
int video_delay
Video only.
Definition: avcodec.h:4247
int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder.
Definition: decode.c:713
const char * r
Definition: vf_curves.c:111
const char *const forced_keyframes_const_names[]
Definition: ffmpeg.c:117
int capabilities
Codec capabilities.
Definition: avcodec.h:3758
struct AVCodecParserContext * av_stream_get_parser(const AVStream *s)
Definition: utils.c:130
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
int av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size, void(*func)(void *, void *, int))
Feed data from an AVFifoBuffer to a user-supplied callback.
Definition: fifo.c:213
int av_buffersrc_close(AVFilterContext *ctx, int64_t pts, unsigned flags)
Close the buffer source after EOF.
Definition: buffersrc.c:263
unsigned int nb_programs
Definition: avformat.h:1506
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:203
preferred ID for decoding MPEG audio layer 1, 2 or 3
Definition: avcodec.h:557
enum AVMediaType codec_type
General type of the encoded data.
Definition: avcodec.h:4148
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
Definition: avcodec.h:1662
const char * arg
Definition: jacosubdec.c:66
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1856
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:558
AVChapter ** chapters
Definition: avformat.h:1557
#define AV_LOG_SKIP_REPEATED
Skip repeated messages, this requires the user app to use av_log() instead of (f)printf as the 2 woul...
Definition: log.h:359
Round to nearest and halfway cases away from zero.
Definition: mathematics.h:84
AVRational time_base_in
The timebase used for the timestamps of the input packets.
Definition: avcodec.h:5954
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:140
simple assert() macros that are a bit more flexible than ISO C assert().
enum AVPacketSideDataType type
Definition: avcodec.h:1625
int av_log_get_level(void)
Get the current log level.
Definition: log.c:386
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:222
const char * name
Name of the codec implementation.
Definition: avcodec.h:3746
static int check_recording_time(OutputStream *ost)
Definition: ffmpeg.c:880
AVBufferRef * av_buffer_create(uint8_t *data, int size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:28
void remove_avoptions(AVDictionary **a, AVDictionary *b)
Definition: ffmpeg.c:623
int eof
Definition: ffmpeg.h:263
int force_fps
Definition: ffmpeg.h:486
int hw_device_setup_for_encode(OutputStream *ost)
Definition: ffmpeg_hw.c:312
AVRational avg_frame_rate
Average framerate.
Definition: avformat.h:970
New fields can be added to the end with minor version bumps.
Definition: avformat.h:1276
#define FFMAX(a, b)
Definition: common.h:94
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:119
void avcodec_parameters_free(AVCodecParameters **par)
Free an AVCodecParameters instance and everything associated with it and write NULL to the supplied p...
Definition: utils.c:2335
int qp_hist
Definition: ffmpeg_opt.c:131
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
Definition: avstring.c:83
#define fail()
Definition: checkasm.h:109
float frame_drop_threshold
Definition: ffmpeg_opt.c:118
#define AV_CODEC_CAP_VARIABLE_FRAME_SIZE
Audio encoder supports receiving a different number of samples in each call.
Definition: avcodec.h:1081
int64_t error[4]
Definition: ffmpeg.h:560
int flags
A combination of AV_PKT_FLAG values.
Definition: avcodec.h:1685
const AVCodecDescriptor * avcodec_descriptor_get(enum AVCodecID id)
Definition: codec_desc.c:3121
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:2574
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
Definition: mathematics.c:147
uint32_t end_display_time
Definition: avcodec.h:4131
static int want_sdp
Definition: ffmpeg.c:137
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:4134
OutputFilter * filter
Definition: ffmpeg.h:508
static void check_decode_result(InputStream *ist, int *got_output, int ret)
Definition: ffmpeg.c:2123
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:379
AVRational frame_aspect_ratio
Definition: ffmpeg.h:491
#define AV_DISPOSITION_LYRICS
Definition: avformat.h:840
static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
Definition: ffmpeg.c:2231
const AVOption * av_opt_find(void *obj, const char *name, const char *unit, int opt_flags, int search_flags)
Look for an option in an object.
Definition: opt.c:1594
#define FFDIFFSIGN(x, y)
Comparator.
Definition: common.h:92
#define AV_DISPOSITION_FORCED
Track should be used during playback by default.
Definition: avformat.h:848
common internal API header
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:106
AVRational mux_timebase
Definition: ffmpeg.h:466
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1405
AVDictionary * opts
Definition: movenc.c:50
AVCodecContext * parser_avctx
Definition: ffmpeg.h:537
int block_align
Audio only.
Definition: avcodec.h:4269
static int nb_frames_drop
Definition: ffmpeg.c:134
A bitmap, pict will be set.
Definition: avcodec.h:4076
int linesize[4]
Definition: avcodec.h:4112
int nb_output_files
Definition: ffmpeg.c:152
int seekable
A combination of AVIO_SEEKABLE_ flags or 0 when the stream is not seekable.
Definition: avio.h:261
int void avio_flush(AVIOContext *s)
Force flushing of buffered data.
Definition: aviobuf.c:236
int channels
number of audio channels, only used for audio.
Definition: frame.h:506
audio channel layout utility functions
int is_cfr
Definition: ffmpeg.h:485
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:284
static int transcode(void)
Definition: ffmpeg.c:4596
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:929
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:439
char filename[1024]
input or output filename
Definition: avformat.h:1425
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:254
av_warn_unused_result int avformat_write_header(AVFormatContext *s, AVDictionary **options)
Allocate the stream private data and write the stream header to an output media file.
Definition: mux.c:528
#define FFMIN(a, b)
Definition: common.h:96
AVPacketSideData * coded_side_data
Additional data associated with the entire coded stream.
Definition: avcodec.h:3591
uint64_t * channel_layouts
Definition: ffmpeg.h:285
#define VSYNC_AUTO
Definition: ffmpeg.h:52
void av_log_set_callback(void(*callback)(void *, int, const char *, va_list))
Set the logging callback.
Definition: log.c:406
attribute_deprecated int avcodec_copy_context(AVCodecContext *dest, const AVCodecContext *src)
Copy the settings of the source AVCodecContext into the destination AVCodecContext.
Definition: options.c:216
int saw_first_ts
Definition: ffmpeg.h:338
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:157
int abort_on_flags
Definition: ffmpeg_opt.c:129
This side data contains quality related information from the encoder.
Definition: avcodec.h:1497
Immediately push the frame to the output.
Definition: buffersrc.h:46
#define AV_DICT_DONT_STRDUP_VAL
Take ownership of a value that&#39;s been allocated with av_malloc() or another memory allocation functio...
Definition: dict.h:76
#define FFSIGN(a)
Definition: common.h:73
struct OutputStream * ost
Definition: ffmpeg.h:268
int width
picture width / height.
Definition: avcodec.h:1948
PVOID HANDLE
char * apad
Definition: ffmpeg.h:517
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames...
Definition: avcodec.h:3616
int64_t nb_samples
Definition: ffmpeg.h:335
AVRational time_base_out
The timebase used for the timestamps of the output packets.
Definition: avcodec.h:5960
int hw_device_setup_for_decode(InputStream *ist)
Definition: ffmpeg_hw.c:255
double forced_keyframes_expr_const_values[FKF_NB]
Definition: ffmpeg.h:499
int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt)
Submit a packet for filtering.
Definition: bsf.c:176
int64_t duration
Definition: ffmpeg.h:406
#define AV_FRAME_FLAG_CORRUPT
The frame data may be corrupted, e.g.
Definition: frame.h:427
const char * name
Definition: avformat.h:524
int width
Definition: ffmpeg.h:254
int32_t
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:119
AVFormatContext * ctx
Definition: movenc.c:48
void av_parser_close(AVCodecParserContext *s)
Definition: parser.c:241
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:908
int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:892
static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output, int *decode_failed)
Definition: ffmpeg.c:2312
int nb_filtergraphs
Definition: ffmpeg.c:155
static av_always_inline int pthread_join(pthread_t thread, void **value_ptr)
Definition: os2threads.h:88
int64_t last_ts
Definition: ffmpeg.h:412
#define TRUE
Definition: windows2linux.h:33
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:324
int do_pkt_dump
Definition: ffmpeg_opt.c:123
int64_t max_frames
Definition: ffmpeg.h:475
#define AV_RL32
Definition: intreadwrite.h:146
static void term_exit_sigsafe(void)
Definition: ffmpeg.c:307
int audio_channels_mapped
Definition: ffmpeg.h:503
int n
Definition: avisynth_c.h:684
AVDictionary * metadata
Definition: avformat.h:961
uint8_t * av_stream_new_side_data(AVStream *stream, enum AVPacketSideDataType type, int size)
Allocate new information from stream.
Definition: utils.c:5325
int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
Definition: avcodec.h:1907
#define AVFILTER_CMD_FLAG_ONE
Stop once a filter understood the command (for target=all for example), fast filters are favored auto...
Definition: avfilter.h:671
static uint32_t codec_flags(enum AVCodecID codec_id)
Definition: cafenc.c:37
const char program_name[]
program name, defined by the program for show_version().
Definition: ffmpeg.c:112
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
Definition: error.h:56
uint8_t * data[4]
data+linesize for the bitmap of this subtitle.
Definition: avcodec.h:4111
static int got_eagain(void)
Definition: ffmpeg.c:4117
int inputs_done
Definition: ffmpeg.h:527
preferred ID for MPEG-1/2 video decoding
Definition: avcodec.h:219
static void error(const char *err)
int vstats_version
Definition: ffmpeg_opt.c:137
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:492
#define AVERROR_EXPERIMENTAL
Requested feature is flagged experimental. Set strict_std_compliance if you really want to use it...
Definition: error.h:72
#define FF_ARRAY_ELEMS(a)
#define AV_DISPOSITION_ATTACHED_PIC
The stream is stored in the file as an attached picture/"cover art" (e.g.
Definition: avformat.h:859
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
Definition: utils.c:1341
void exit_program(int ret)
Wraps exit with a program-specific cleanup routine.
Definition: cmdutils.c:135
AVCodecContext * enc
Definition: muxing.c:55
#define av_log2
Definition: intmath.h:83
#define AV_DISPOSITION_VISUAL_IMPAIRED
stream for visual impaired audiences
Definition: avformat.h:850
int ret
Definition: ffmpeg.h:349
Keep a reference to the frame.
Definition: buffersrc.h:53
int audio_volume
Definition: ffmpeg_opt.c:115
void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout)
Return a description of a channel layout.
Stream structure.
Definition: avformat.h:889
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal decoder state / flush internal buffers.
Definition: decode.c:1726
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
#define AVFMT_NOTIMESTAMPS
Format does not need / have any timestamps.
Definition: avformat.h:486
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:274
InputFilter ** filters
Definition: ffmpeg.h:365
int fix_sub_duration
Definition: ffmpeg.h:346
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:646
#define VSYNC_DROP
Definition: ffmpeg.h:57
static int ifilter_has_all_input_formats(FilterGraph *fg)
Definition: ffmpeg.c:2140
int64_t recording_time
Definition: ffmpeg.h:415
int avformat_network_deinit(void)
Undo the initialization done by avformat_network_init.
Definition: utils.c:4880
Definition: ffmpeg.h:74
int frame_size
Number of samples per channel in an audio frame.
Definition: avcodec.h:2543
static av_always_inline int pthread_create(pthread_t *thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg)
Definition: os2threads.h:74
int(* init)(AVCodecContext *s)
Definition: ffmpeg.h:76
static int check_init_output_file(OutputFile *of, int file_index)
Definition: ffmpeg.c:2970
#define AV_DISPOSITION_DEFAULT
Definition: avformat.h:836
AVStream * st
Definition: ffmpeg.h:304
static int sub2video_get_blank_frame(InputStream *ist)
Definition: ffmpeg.c:173
sample_rate
static void set_encoder_id(OutputFile *of, OutputStream *ost)
Definition: ffmpeg.c:3165
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
AVRational av_buffersink_get_sample_aspect_ratio(const AVFilterContext *ctx)
int frame_size
Definition: mxfenc.c:1896
AVCodecParserContext * av_parser_init(int codec_id)
Definition: parser.c:52
#define AV_DISPOSITION_DESCRIPTIONS
Definition: avformat.h:872
int ost_index
Definition: ffmpeg.h:566
struct InputStream * sync_ist
Definition: ffmpeg.h:458
int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags)
The default callback for AVCodecContext.get_buffer2().
Definition: decode.c:1447
enum AVMediaType codec_type
Definition: avcodec.h:1769
double ts_scale
Definition: ffmpeg.h:337
int unavailable
Definition: ffmpeg.h:519
int64_t pkt_duration
duration of the corresponding packet, expressed in AVStream->time_base units, 0 if unknown...
Definition: frame.h:481
void av_thread_message_queue_set_err_send(AVThreadMessageQueue *mq, int err)
Set the sending error code.
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer...
Definition: options.c:172
static int init_input_stream(int ist_index, char *error, int error_len)
Definition: ffmpeg.c:2892
enum AVCodecID codec_id
Definition: avcodec.h:1778
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:327
int av_fifo_size(const AVFifoBuffer *f)
Return the amount of data in bytes in the AVFifoBuffer, that is the amount of data you can read from ...
Definition: fifo.c:77
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
int av_opt_set_dict(void *obj, AVDictionary **options)
Set all the options from a given dictionary on an object.
Definition: opt.c:1589
static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h, AVSubtitleRect *r)
Definition: ffmpeg.c:188
float max_error_rate
Definition: ffmpeg_opt.c:134
int sample_rate
samples per second
Definition: avcodec.h:2523
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:232
uint64_t frames_encoded
Definition: ffmpeg.h:545
AVIOContext * pb
I/O context.
Definition: avformat.h:1391
int64_t av_rescale_delta(AVRational in_tb, int64_t in_ts, AVRational fs_tb, int duration, int64_t *last, AVRational out_tb)
Rescale a timestamp while preserving known durations.
Definition: mathematics.c:168
int av_fifo_realloc2(AVFifoBuffer *f, unsigned int new_size)
Resize an AVFifoBuffer.
Definition: fifo.c:87
AVFifoBuffer * muxing_queue
Definition: ffmpeg.h:554
int ist_index
Definition: ffmpeg.h:404
const struct AVCodecTag *const * codec_tag
List of supported codec_id-codec_tag pairs, ordered by "better choice first".
Definition: avformat.h:549
static int loop
Definition: ffplay.c:336
int debug
debug
Definition: avcodec.h:3003
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
static void print_sdp(void)
Definition: ffmpeg.c:2783
const char * graph_desc
Definition: ffmpeg.h:291
int guess_layout_max
Definition: ffmpeg.h:342
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
uint64_t av_buffersink_get_channel_layout(const AVFilterContext *ctx)
int64_t start_time
Definition: ffmpeg.h:413
struct InputStream::@38 prev_sub
main external API structure.
Definition: avcodec.h:1761
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:618
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:357
uint8_t * data
The data buffer.
Definition: buffer.h:89
static void ffmpeg_cleanup(int ret)
Definition: ffmpeg.c:466
static int command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Definition: vf_drawtext.c:863
int * sample_rates
Definition: ffmpeg.h:286
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: utils.c:1191
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:289
const char * attachment_filename
Definition: ffmpeg.h:529
static int check_output_constraints(InputStream *ist, OutputStream *ost)
Definition: ffmpeg.c:1972
int avfilter_graph_request_oldest(AVFilterGraph *graph)
Request a frame on the oldest sink link.
a very simple circular buffer FIFO implementation
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
Definition: encode.c:400
AVRational time_base
Definition: ffmpeg.h:408
void assert_avoptions(AVDictionary *m)
Definition: ffmpeg.c:632
AVCodecContext * enc_ctx
Definition: ffmpeg.h:472
void * buf
Definition: avisynth_c.h:690
AVFrame * decoded_frame
Definition: ffmpeg.h:313
int extradata_size
Definition: avcodec.h:1877
Perform non-blocking operation.
Definition: threadmessage.h:31
AVBufferRef * hw_frames_ctx
Definition: ffmpeg.h:261
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
Replacements for frequently missing libm functions.
static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
Perform a step of transcoding for the specified filter graph.
Definition: ffmpeg.c:4475
int nb_coded_side_data
Definition: avcodec.h:3592
int channels
Definition: ffmpeg.h:258
int * audio_channels_map
Definition: ffmpeg.h:502
#define VSYNC_PASSTHROUGH
Definition: ffmpeg.h:53
Describe the class of an AVClass context structure.
Definition: log.h:67
int sample_rate
Sample rate of the audio data.
Definition: frame.h:374
int configure_filtergraph(FilterGraph *fg)
int(* get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags)
This callback is called at the beginning of each frame to get data buffer(s) for it.
Definition: avcodec.h:2678
OutputStream ** output_streams
Definition: ffmpeg.c:149
int index
Definition: gxfenc.c:89
enum AVMediaType av_buffersink_get_type(const AVFilterContext *ctx)
int av_find_nearest_q_idx(AVRational q, const AVRational *q_list)
Find the value in a list of rationals nearest a given reference rational.
Definition: rational.c:142
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:3015
Rational number (pair of numerator and denominator).
Definition: rational.h:58
int file_index
Definition: ffmpeg.h:450
int av_thread_message_queue_alloc(AVThreadMessageQueue **mq, unsigned nelem, unsigned elsize)
Allocate a new message queue.
Definition: threadmessage.c:40
#define ABORT_ON_FLAG_EMPTY_OUTPUT
Definition: ffmpeg.h:440
double rotate_override_value
Definition: ffmpeg.h:489
int avcodec_parameters_from_context(AVCodecParameters *par, const AVCodecContext *codec)
Fill the parameters struct based on the values from the supplied codec context.
Definition: utils.c:2364
static int current_time
Definition: ffmpeg.c:139
int64_t sync_opts
Definition: ffmpeg.h:459
char * vstats_filename
Definition: ffmpeg_opt.c:108
int64_t parse_time_or_die(const char *context, const char *timestr, int is_duration)
Parse a string specifying a time and return its corresponding value as a number of microseconds...
Definition: cmdutils.c:164
AVCodecContext * dec_ctx
Definition: ffmpeg.h:311
char * disposition
Definition: ffmpeg.h:532
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:121
int filtergraph_is_simple(FilterGraph *fg)
#define mid_pred
Definition: mathops.h:97
AVMediaType
Definition: avutil.h:199
discard useless packets like 0 size packets in avi
Definition: avcodec.h:825
int avio_open2(AVIOContext **s, const char *url, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: aviobuf.c:1099
int av_buffersink_get_w(const AVFilterContext *ctx)
int nb_streams_warn
Definition: ffmpeg.h:418
static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
Definition: ffmpeg.c:3269
AVDictionary * decoder_opts
Definition: ffmpeg.h:339
int autorotate
Definition: ffmpeg.h:344
const char * name
Name of the codec described by this descriptor.
Definition: avcodec.h:711
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: utils.c:653
#define snprintf
Definition: snprintf.h:34
int64_t best_effort_timestamp
frame timestamp estimated using various heuristics, in stream time base
Definition: frame.h:466
int frame_bits_per_raw_sample
Definition: ffmpeg_opt.c:133
void avformat_free_context(AVFormatContext *s)
Free an AVFormatContext and all its streams.
Definition: utils.c:4318
#define u(width,...)
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(INT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&HAVE_MMX) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
int64_t ts_offset
Definition: ffmpeg.h:411
uint32_t DWORD
unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
Get the number of failed requests.
Definition: buffersrc.c:293
static int transcode_step(void)
Run a single step of transcoding.
Definition: ffmpeg.c:4521
char * filters_script
filtergraph script associated to the -filter_script option
Definition: ffmpeg.h:511
int decode_error_flags
decode error flags of the frame, set to a combination of FF_DECODE_ERROR_xxx flags if the decoder pro...
Definition: frame.h:497
int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes)
Return audio frame duration.
Definition: utils.c:1932
misc parsing utilities
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: utils.c:1724
static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base, AVRational time_base)
Definition: ffmpeg.c:4136
size_t av_strlcat(char *dst, const char *src, size_t size)
Append the string src to the string dst, but to a total length of no more than size - 1 bytes...
Definition: avstring.c:93
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:266
This struct describes the properties of a single codec described by an AVCodecID. ...
Definition: avcodec.h:703
AVFrame * filtered_frame
Definition: ffmpeg.h:476
int source_index
Definition: ffmpeg.h:452
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:283
static volatile int received_nb_signals
Definition: ffmpeg.c:322
int(* hwaccel_get_buffer)(AVCodecContext *s, AVFrame *frame, int flags)
Definition: ffmpeg.h:379
int copy_prior_start
Definition: ffmpeg.h:531
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:505
int global_quality
Global quality for codecs which cannot change it per frame.
Definition: avcodec.h:1842
static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
Definition: ffmpeg.c:663
static int64_t pts
Global timestamp for the audio frames.
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:79
void av_thread_message_queue_free(AVThreadMessageQueue **mq)
Free a message queue.
Definition: threadmessage.c:91
int nb_filters
Definition: ffmpeg.h:366
static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
Definition: ffmpeg.c:2837
int64_t start_time
Position of the first frame of the component, in AV_TIME_BASE fractional seconds. ...
Definition: avformat.h:1434
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:215
uint8_t level
Definition: svq3.c:207
AVExpr * forced_keyframes_pexpr
Definition: ffmpeg.h:498
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
Definition: samplefmt.c:106
int64_t dts
dts of the last packet read for this stream (in AV_TIME_BASE units)
Definition: ffmpeg.h:320
int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
Seek to the keyframe at timestamp.
Definition: utils.c:2463
int av_strerror(int errnum, char *errbuf, size_t errbuf_size)
Put a description of the AVERROR code errnum in errbuf.
Definition: error.c:105
int forced_kf_count
Definition: ffmpeg.h:495
int64_t start
Definition: avformat.h:1309
int64_t duration
Decoding: duration of the stream, in stream time base.
Definition: avformat.h:946
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
OSTFinished finished
Definition: ffmpeg.h:518
char * forced_keyframes
Definition: ffmpeg.h:497
int sample_rate
Audio only.
Definition: avcodec.h:4262
uint64_t data_size
Definition: ffmpeg.h:387
void avdevice_register_all(void)
Initialize libavdevice and register all the input and output devices.
Definition: alldevices.c:73
int64_t next_pts
synthetic pts for the next decode frame (in AV_TIME_BASE units)
Definition: ffmpeg.h:322
Main libavformat public API header.
void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
Definition: cmdutils.c:1051
struct FilterGraph * graph
Definition: ffmpeg.h:269
uint64_t limit_filesize
Definition: ffmpeg.h:569
static void print_final_stats(int64_t total_size)
Definition: ffmpeg.c:1530
AVIOContext * progress_avio
Definition: ffmpeg.c:140
int main(int argc, char **argv)
Definition: ffmpeg.c:4781
int reinit_filters
Definition: ffmpeg.h:368
#define AVFMT_NOFILE
Demuxer will use avio_open, no opened file should be provided by the caller.
Definition: avformat.h:478
AVCodecParameters * ref_par
Definition: ffmpeg.h:473
#define VSYNC_CFR
Definition: ffmpeg.h:54
void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size)
Set the frame size for an audio buffer sink.
Definition: buffersink.c:175
#define AV_CODEC_CAP_PARAM_CHANGE
Codec supports changed parameters at any point.
Definition: avcodec.h:1073
int64_t start_time
Decoding: pts of the first frame of the stream in presentation order, in stream time base...
Definition: avformat.h:936
static double c[64]
#define AVFMT_NOSTREAMS
Format does not require any streams.
Definition: avformat.h:491
AVStream * st
Definition: muxing.c:54
static AVCodecContext * dec_ctx
int disposition
AV_DISPOSITION_* bit field.
Definition: avformat.h:950
uint32_t start_display_time
Definition: avcodec.h:4130
AVRational av_add_q(AVRational b, AVRational c)
Add two rationals.
Definition: rational.c:93
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
int pts_wrap_bits
number of bits in pts (used for wrapping control)
Definition: avformat.h:1055
uint64_t samples_encoded
Definition: ffmpeg.h:546
AVRational time_base
time base in which the start/end timestamps are specified
Definition: avformat.h:1308
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Definition: avpacket.c:33
int thread_safe_callbacks
Set by the client if its custom get_buffer() callback can be called synchronously from another thread...
Definition: avcodec.h:3221
int64_t nb_frames
number of frames in this stream if known or 0
Definition: avformat.h:948
char * key
Definition: dict.h:86
static FILE * vstats_file
Definition: ffmpeg.c:115
int den
Denominator.
Definition: rational.h:60
void uninit_opts(void)
Uninitialize the cmdutils option system, in particular free the *_opts contexts and their contents...
Definition: cmdutils.c:89
AVFrame * last_frame
Definition: ffmpeg.h:477
int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags)
Send a command to one or more filter instances.
int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Add a frame to the buffer source.
Definition: buffersrc.c:152
uint64_t channel_layout
Definition: ffmpeg.h:259
#define AV_PKT_FLAG_CORRUPT
The packet content is corrupted.
Definition: avcodec.h:1712
int copy_ts
Definition: ffmpeg_opt.c:124
static void do_video_out(OutputFile *of, OutputStream *ost, AVFrame *next_picture, double sync_ipts)
Definition: ffmpeg.c:1036
struct AVInputFormat * iformat
The input container format.
Definition: avformat.h:1361
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: utils.c:4355
AVFormatContext * ctx
Definition: ffmpeg.h:401
int pict_type
Definition: ffmpeg.h:557
AVSubtitle subtitle
Definition: ffmpeg.h:350
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:896
int eof_reached
Definition: ffmpeg.h:402
Flag telling rescaling functions to pass INT64_MIN/MAX through unchanged, avoiding special cases for ...
Definition: mathematics.h:108
int forced_kf_index
Definition: ffmpeg.h:496
#define AVFMT_VARIABLE_FPS
Format allows variable fps.
Definition: avformat.h:489
char * avfilter
Definition: ffmpeg.h:509
uint8_t * name
Definition: ffmpeg.h:246
char * value
Definition: dict.h:87
AVFifoBuffer * av_fifo_alloc(unsigned int size)
Initialize an AVFifoBuffer.
Definition: fifo.c:43
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:353
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
#define NAN
Definition: math.h:28
float dts_delta_threshold
Definition: ffmpeg_opt.c:112
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:727
int channels
number of audio channels
Definition: avcodec.h:2524
int top_field_first
Definition: ffmpeg.h:487
int av_buffersink_get_channels(const AVFilterContext *ctx)
OutputFilter ** outputs
Definition: ffmpeg.h:298
InputFile ** input_files
Definition: ffmpeg.c:146
static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
Definition: ffmpeg.c:2882
void av_log_set_flags(int arg)
Definition: log.c:396
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:279
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:227
static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
Definition: ffmpeg.c:2151
AVFormatContext * ctx
Definition: ffmpeg.h:564
#define lrint
Definition: tablegen.h:53
#define AV_DISPOSITION_COMMENT
Definition: avformat.h:839
void show_usage(void)
Definition: ffmpeg_opt.c:3245
int channels
Audio only.
Definition: avcodec.h:4258
An instance of a filter.
Definition: avfilter.h:338
static void do_audio_out(OutputFile *of, OutputStream *ost, AVFrame *frame)
Definition: ffmpeg.c:893
#define LIBAVCODEC_IDENT
Definition: version.h:42
char * hwaccel_device
Definition: ffmpeg.h:372
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: avcodec.h:1678
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
AVDictionary * encoder_opts
Definition: ffmpeg.h:513
int64_t av_get_default_channel_layout(int nb_channels)
Return default channel layout for a given number of channels.
int av_write_trailer(AVFormatContext *s)
Write the stream trailer to an output media file and free the file private data.
Definition: mux.c:1301
const int program_birth_year
program birth year, defined by the program for show_banner()
Definition: ffmpeg.c:113
int repeat_pict
This field is used for proper frame duration computation in lavf.
Definition: avcodec.h:5185
int height
Definition: frame.h:259
FILE * out
Definition: movenc.c:54
InputFilter ** inputs
Definition: ffmpeg.h:296
#define av_freep(p)
enum AVPixelFormat hwaccel_retrieved_pix_fmt
Definition: ffmpeg.h:382
const char * name
A comma separated list of short names for the format.
Definition: avformat.h:664
static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
Definition: ffmpeg.c:2263
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key, ignoring the suffix of the found key string.
Definition: dict.h:70
static int send_filter_eof(InputStream *ist)
Definition: ffmpeg.c:2586
#define FF_DEBUG_VIS_QP
Definition: avcodec.h:3025
OutputFile ** output_files
Definition: ffmpeg.c:151
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:170
void av_fifo_freep(AVFifoBuffer **f)
Free an AVFifoBuffer and reset pointer to NULL.
Definition: fifo.c:63
AVCodecParameters * codecpar
Definition: avformat.h:1252
#define av_malloc_array(a, b)
int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq, enum AVRounding rnd)
Rescale a 64-bit integer by 2 rational numbers with specified rounding.
Definition: mathematics.c:134
static void flush_encoders(void)
Definition: ffmpeg.c:1839
uint32_t codec_tag
Additional information about the codec (corresponds to the AVI FOURCC).
Definition: avcodec.h:4156
int copy_tb
Definition: ffmpeg_opt.c:126
int64_t min_pts
Definition: ffmpeg.h:328
int initialized
Definition: ffmpeg.h:525
static volatile int received_sigterm
Definition: ffmpeg.c:321
#define FFSWAP(type, a, b)
Definition: common.h:99
int discard
Definition: ffmpeg.h:305
static int get_input_packet(InputFile *f, AVPacket *pkt)
Definition: ffmpeg.c:4097
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int size)
Allocate new information of a packet.
Definition: avpacket.c:329
int stream_index
Definition: avcodec.h:1681
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avformat.h:926
enum HWAccelID hwaccel_id
Definition: ffmpeg.h:371
int depth
Number of bits in the component.
Definition: pixdesc.h:58
enum AVSubtitleType type
Definition: avcodec.h:4114
int64_t first_pts
Definition: ffmpeg.h:462
int nb_inputs
Definition: ffmpeg.h:297
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:952
#define DECODING_FOR_OST
Definition: ffmpeg.h:308
int index
Definition: ffmpeg.h:451
AVRational r_frame_rate
Real base framerate of the stream.
Definition: avformat.h:1108
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
OSTFinished
Definition: ffmpeg.h:444
This structure stores compressed data.
Definition: avcodec.h:1656
void av_register_all(void)
Initialize libavformat and register all the muxers, demuxers and protocols.
Definition: allformats.c:390
int avio_closep(AVIOContext **s)
Close the resource accessed by the AVIOContext *s, free it and set the pointer pointing to it to NULL...
Definition: aviobuf.c:1136
AVCodecParameters * par_in
Parameters of the input stream.
Definition: avcodec.h:5942
int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size, const AVSubtitle *sub)
Definition: encode.c:355
AVFifoBuffer * frame_queue
Definition: ffmpeg.h:249
int debug_ts
Definition: ffmpeg_opt.c:127
AVRational av_buffersink_get_time_base(const AVFilterContext *ctx)
static OutputStream * choose_output(void)
Select the output stream to process.
Definition: ffmpeg.c:3846
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:267
static void sigterm_handler(int sig)
Definition: ffmpeg.c:328
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1672
int64_t av_stream_get_end_pts(const AVStream *st)
Returns the pts of the last muxed packet + its duration.
Definition: utils.c:122
void * opaque
Private data of the user, can be used to carry app specific stuff.
Definition: avcodec.h:1818
for(j=16;j >0;--j)
#define FFMAX3(a, b, c)
Definition: common.h:95
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
#define tb
Definition: regdef.h:68
AVProgram ** programs
Definition: avformat.h:1507
#define AV_DISPOSITION_ORIGINAL
Definition: avformat.h:838
int avio_printf(AVIOContext *s, const char *fmt,...) av_printf_format(2
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:144
InputStream ** input_streams
Definition: ffmpeg.c:144
static unsigned dup_warning
Definition: ffmpeg.c:133
const HWAccel hwaccels[]
Definition: ffmpeg_opt.c:69
Definition: ffmpeg.h:436
static void close_output_stream(OutputStream *ost)
Definition: ffmpeg.c:807
uint8_t * subtitle_header
Header containing style information for text subtitles.
Definition: avcodec.h:3396
static uint8_t tmp[11]
Definition: aes_ctr.c:26