avconv.c
Go to the documentation of this file.
1 /*
2  * avconv main
3  * Copyright (c) 2000-2011 The libav developers.
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include "config.h"
23 #include <ctype.h>
24 #include <string.h>
25 #include <math.h>
26 #include <stdlib.h>
27 #include <errno.h>
28 #include <signal.h>
29 #include <limits.h>
30 #include "libavformat/avformat.h"
31 #include "libavdevice/avdevice.h"
32 #include "libswscale/swscale.h"
34 #include "libavutil/opt.h"
36 #include "libavutil/parseutils.h"
37 #include "libavutil/samplefmt.h"
38 #include "libavutil/colorspace.h"
39 #include "libavutil/fifo.h"
40 #include "libavutil/intreadwrite.h"
41 #include "libavutil/dict.h"
42 #include "libavutil/mathematics.h"
43 #include "libavutil/pixdesc.h"
44 #include "libavutil/avstring.h"
45 #include "libavutil/libm.h"
46 #include "libavutil/imgutils.h"
47 #include "libavutil/time.h"
48 #include "libavformat/os_support.h"
49 
50 # include "libavfilter/avfilter.h"
52 # include "libavfilter/buffersrc.h"
53 # include "libavfilter/buffersink.h"
54 
55 #if HAVE_SYS_RESOURCE_H
56 #include <sys/time.h>
57 #include <sys/types.h>
58 #include <sys/resource.h>
59 #elif HAVE_GETPROCESSTIMES
60 #include <windows.h>
61 #endif
62 #if HAVE_GETPROCESSMEMORYINFO
63 #include <windows.h>
64 #include <psapi.h>
65 #endif
66 
67 #if HAVE_SYS_SELECT_H
68 #include <sys/select.h>
69 #endif
70 
71 #if HAVE_PTHREADS
72 #include <pthread.h>
73 #endif
74 
75 #include <time.h>
76 
77 #include "avconv.h"
78 #include "cmdutils.h"
79 
80 #include "libavutil/avassert.h"
81 
82 const char program_name[] = "avconv";
83 const int program_birth_year = 2000;
84 
85 static FILE *vstats_file;
86 
87 static int64_t video_size = 0;
88 static int64_t audio_size = 0;
89 static int64_t extra_size = 0;
90 static int nb_frames_dup = 0;
91 static int nb_frames_drop = 0;
92 
93 
94 
95 #if HAVE_PTHREADS
96 /* signal to input threads that they should exit; set by the main thread */
97 static int transcoding_finished;
98 #endif
99 
100 #define DEFAULT_PASS_LOGFILENAME_PREFIX "av2pass"
101 
106 
111 
114 
115 static void term_exit(void)
116 {
117  av_log(NULL, AV_LOG_QUIET, "");
118 }
119 
120 static volatile int received_sigterm = 0;
121 static volatile int received_nb_signals = 0;
122 
123 static void
125 {
126  received_sigterm = sig;
128  term_exit();
129 }
130 
131 static void term_init(void)
132 {
133  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
134  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
135 #ifdef SIGXCPU
136  signal(SIGXCPU, sigterm_handler);
137 #endif
138 }
139 
140 static int decode_interrupt_cb(void *ctx)
141 {
142  return received_nb_signals > 1;
143 }
144 
146 
147 static void exit_program(void)
148 {
149  int i, j;
150 
151  for (i = 0; i < nb_filtergraphs; i++) {
152  avfilter_graph_free(&filtergraphs[i]->graph);
153  for (j = 0; j < filtergraphs[i]->nb_inputs; j++) {
154  av_freep(&filtergraphs[i]->inputs[j]->name);
155  av_freep(&filtergraphs[i]->inputs[j]);
156  }
157  av_freep(&filtergraphs[i]->inputs);
158  for (j = 0; j < filtergraphs[i]->nb_outputs; j++) {
159  av_freep(&filtergraphs[i]->outputs[j]->name);
160  av_freep(&filtergraphs[i]->outputs[j]);
161  }
162  av_freep(&filtergraphs[i]->outputs);
163  av_freep(&filtergraphs[i]);
164  }
165  av_freep(&filtergraphs);
166 
167  /* close files */
168  for (i = 0; i < nb_output_files; i++) {
169  AVFormatContext *s = output_files[i]->ctx;
170  if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)
171  avio_close(s->pb);
173  av_dict_free(&output_files[i]->opts);
174  av_freep(&output_files[i]);
175  }
176  for (i = 0; i < nb_output_streams; i++) {
177  AVBitStreamFilterContext *bsfc = output_streams[i]->bitstream_filters;
178  while (bsfc) {
179  AVBitStreamFilterContext *next = bsfc->next;
181  bsfc = next;
182  }
183  output_streams[i]->bitstream_filters = NULL;
184  avcodec_free_frame(&output_streams[i]->filtered_frame);
185 
186  av_freep(&output_streams[i]->forced_keyframes);
187  av_freep(&output_streams[i]->avfilter);
188  av_freep(&output_streams[i]->logfile_prefix);
189  av_freep(&output_streams[i]);
190  }
191  for (i = 0; i < nb_input_files; i++) {
192  avformat_close_input(&input_files[i]->ctx);
193  av_freep(&input_files[i]);
194  }
195  for (i = 0; i < nb_input_streams; i++) {
196  avcodec_free_frame(&input_streams[i]->decoded_frame);
197  av_dict_free(&input_streams[i]->opts);
198  free_buffer_pool(&input_streams[i]->buffer_pool);
199  av_freep(&input_streams[i]->filters);
200  av_freep(&input_streams[i]);
201  }
202 
203  if (vstats_file)
204  fclose(vstats_file);
206 
207  av_freep(&input_streams);
208  av_freep(&input_files);
209  av_freep(&output_streams);
210  av_freep(&output_files);
211 
212  uninit_opts();
213 
214  avfilter_uninit();
216 
217  if (received_sigterm) {
218  av_log(NULL, AV_LOG_INFO, "Received signal %d: terminating.\n",
219  (int) received_sigterm);
220  exit (255);
221  }
222 }
223 
225 {
227  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
228  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
229  exit(1);
230  }
231 }
232 
233 static void abort_codec_experimental(AVCodec *c, int encoder)
234 {
235  const char *codec_string = encoder ? "encoder" : "decoder";
236  AVCodec *codec;
237  av_log(NULL, AV_LOG_FATAL, "%s '%s' is experimental and might produce bad "
238  "results.\nAdd '-strict experimental' if you want to use it.\n",
239  codec_string, c->name);
240  codec = encoder ? avcodec_find_encoder(c->id) : avcodec_find_decoder(c->id);
241  if (!(codec->capabilities & CODEC_CAP_EXPERIMENTAL))
242  av_log(NULL, AV_LOG_FATAL, "Or use the non experimental %s '%s'.\n",
243  codec_string, codec->name);
244  exit(1);
245 }
246 
247 /*
248  * Update the requested input sample format based on the output sample format.
249  * This is currently only used to request float output from decoders which
250  * support multiple sample formats, one of which is AV_SAMPLE_FMT_FLT.
251  * Ideally this will be removed in the future when decoders do not do format
252  * conversion and only output in their native format.
253  */
254 static void update_sample_fmt(AVCodecContext *dec, AVCodec *dec_codec,
255  AVCodecContext *enc)
256 {
257  /* if sample formats match or a decoder sample format has already been
258  requested, just return */
259  if (enc->sample_fmt == dec->sample_fmt ||
261  return;
262 
263  /* if decoder supports more than one output format */
264  if (dec_codec && dec_codec->sample_fmts &&
265  dec_codec->sample_fmts[0] != AV_SAMPLE_FMT_NONE &&
266  dec_codec->sample_fmts[1] != AV_SAMPLE_FMT_NONE) {
267  const enum AVSampleFormat *p;
268  int min_dec = INT_MAX, min_inc = INT_MAX;
269  enum AVSampleFormat dec_fmt = AV_SAMPLE_FMT_NONE;
270  enum AVSampleFormat inc_fmt = AV_SAMPLE_FMT_NONE;
271 
272  /* find a matching sample format in the encoder */
273  for (p = dec_codec->sample_fmts; *p != AV_SAMPLE_FMT_NONE; p++) {
274  if (*p == enc->sample_fmt) {
275  dec->request_sample_fmt = *p;
276  return;
277  } else {
280  int fmt_diff = 32 * abs(dfmt - efmt);
281  if (av_sample_fmt_is_planar(*p) !=
283  fmt_diff++;
284  if (dfmt == efmt) {
285  min_inc = fmt_diff;
286  inc_fmt = *p;
287  } else if (dfmt > efmt) {
288  if (fmt_diff < min_inc) {
289  min_inc = fmt_diff;
290  inc_fmt = *p;
291  }
292  } else {
293  if (fmt_diff < min_dec) {
294  min_dec = fmt_diff;
295  dec_fmt = *p;
296  }
297  }
298  }
299  }
300 
301  /* if none match, provide the one that matches quality closest */
302  dec->request_sample_fmt = min_inc != INT_MAX ? inc_fmt : dec_fmt;
303  }
304 }
305 
307 {
309  AVCodecContext *avctx = ost->st->codec;
310  int ret;
311 
312  /*
313  * Audio encoders may split the packets -- #frames in != #packets out.
314  * But there is no reordering, so we can limit the number of output packets
315  * by simply dropping them here.
316  * Counting encoded video frames needs to be done separately because of
317  * reordering, see do_video_out()
318  */
319  if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
320  if (ost->frame_number >= ost->max_frames) {
321  av_free_packet(pkt);
322  return;
323  }
324  ost->frame_number++;
325  }
326 
327  while (bsfc) {
328  AVPacket new_pkt = *pkt;
329  int a = av_bitstream_filter_filter(bsfc, avctx, NULL,
330  &new_pkt.data, &new_pkt.size,
331  pkt->data, pkt->size,
332  pkt->flags & AV_PKT_FLAG_KEY);
333  if (a > 0) {
334  av_free_packet(pkt);
335  new_pkt.destruct = av_destruct_packet;
336  } else if (a < 0) {
337  av_log(NULL, AV_LOG_ERROR, "%s failed for stream %d, codec %s",
338  bsfc->filter->name, pkt->stream_index,
339  avctx->codec ? avctx->codec->name : "copy");
340  print_error("", a);
341  if (exit_on_error)
342  exit(1);
343  }
344  *pkt = new_pkt;
345 
346  bsfc = bsfc->next;
347  }
348 
349  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS) &&
350  ost->last_mux_dts != AV_NOPTS_VALUE &&
351  pkt->dts < ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT)) {
352  av_log(NULL, AV_LOG_WARNING, "Non-monotonous DTS in output stream "
353  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
354  ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
355  if (exit_on_error) {
356  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
357  exit(1);
358  }
359  av_log(NULL, AV_LOG_WARNING, "changing to %"PRId64". This may result "
360  "in incorrect timestamps in the output file.\n",
361  ost->last_mux_dts + 1);
362  pkt->dts = ost->last_mux_dts + 1;
363  if (pkt->pts != AV_NOPTS_VALUE)
364  pkt->pts = FFMAX(pkt->pts, pkt->dts);
365  }
366  ost->last_mux_dts = pkt->dts;
367 
368  pkt->stream_index = ost->index;
369  ret = av_interleaved_write_frame(s, pkt);
370  if (ret < 0) {
371  print_error("av_interleaved_write_frame()", ret);
372  exit(1);
373  }
374 }
375 
377 {
378  OutputFile *of = output_files[ost->file_index];
379 
380  if (of->recording_time != INT64_MAX &&
382  AV_TIME_BASE_Q) >= 0) {
383  ost->finished = 1;
384  return 0;
385  }
386  return 1;
387 }
388 
390  AVFrame *frame)
391 {
392  AVCodecContext *enc = ost->st->codec;
393  AVPacket pkt;
394  int got_packet = 0;
395 
396  av_init_packet(&pkt);
397  pkt.data = NULL;
398  pkt.size = 0;
399 
400  if (!check_recording_time(ost))
401  return;
402 
403  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
404  frame->pts = ost->sync_opts;
405  ost->sync_opts = frame->pts + frame->nb_samples;
406 
407  if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
408  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
409  exit(1);
410  }
411 
412  if (got_packet) {
413  if (pkt.pts != AV_NOPTS_VALUE)
414  pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
415  if (pkt.dts != AV_NOPTS_VALUE)
416  pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
417  if (pkt.duration > 0)
418  pkt.duration = av_rescale_q(pkt.duration, enc->time_base, ost->st->time_base);
419 
420  write_frame(s, &pkt, ost);
421 
422  audio_size += pkt.size;
423  }
424 }
425 
426 static void pre_process_video_frame(InputStream *ist, AVPicture *picture, void **bufp)
427 {
428  AVCodecContext *dec;
429  AVPicture *picture2;
430  AVPicture picture_tmp;
431  uint8_t *buf = 0;
432 
433  dec = ist->st->codec;
434 
435  /* deinterlace : must be done before any resize */
436  if (do_deinterlace) {
437  int size;
438 
439  /* create temporary picture */
440  size = avpicture_get_size(dec->pix_fmt, dec->width, dec->height);
441  buf = av_malloc(size);
442  if (!buf)
443  return;
444 
445  picture2 = &picture_tmp;
446  avpicture_fill(picture2, buf, dec->pix_fmt, dec->width, dec->height);
447 
448  if (avpicture_deinterlace(picture2, picture,
449  dec->pix_fmt, dec->width, dec->height) < 0) {
450  /* if error, do not deinterlace */
451  av_log(NULL, AV_LOG_WARNING, "Deinterlacing failed\n");
452  av_free(buf);
453  buf = NULL;
454  picture2 = picture;
455  }
456  } else {
457  picture2 = picture;
458  }
459 
460  if (picture != picture2)
461  *picture = *picture2;
462  *bufp = buf;
463 }
464 
466  OutputStream *ost,
467  InputStream *ist,
468  AVSubtitle *sub,
469  int64_t pts)
470 {
471  static uint8_t *subtitle_out = NULL;
472  int subtitle_out_max_size = 1024 * 1024;
473  int subtitle_out_size, nb, i;
474  AVCodecContext *enc;
475  AVPacket pkt;
476 
477  if (pts == AV_NOPTS_VALUE) {
478  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
479  if (exit_on_error)
480  exit(1);
481  return;
482  }
483 
484  enc = ost->st->codec;
485 
486  if (!subtitle_out) {
487  subtitle_out = av_malloc(subtitle_out_max_size);
488  }
489 
490  /* Note: DVB subtitle need one packet to draw them and one other
491  packet to clear them */
492  /* XXX: signal it in the codec context ? */
494  nb = 2;
495  else
496  nb = 1;
497 
498  for (i = 0; i < nb; i++) {
499  ost->sync_opts = av_rescale_q(pts, ist->st->time_base, enc->time_base);
500  if (!check_recording_time(ost))
501  return;
502 
503  sub->pts = av_rescale_q(pts, ist->st->time_base, AV_TIME_BASE_Q);
504  // start_display_time is required to be 0
505  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
507  sub->start_display_time = 0;
508  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
509  subtitle_out_max_size, sub);
510  if (subtitle_out_size < 0) {
511  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
512  exit(1);
513  }
514 
515  av_init_packet(&pkt);
516  pkt.data = subtitle_out;
517  pkt.size = subtitle_out_size;
518  pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
519  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
520  /* XXX: the pts correction is handled here. Maybe handling
521  it in the codec would be better */
522  if (i == 0)
523  pkt.pts += 90 * sub->start_display_time;
524  else
525  pkt.pts += 90 * sub->end_display_time;
526  }
527  write_frame(s, &pkt, ost);
528  }
529 }
530 
532  OutputStream *ost,
533  AVFrame *in_picture,
534  int *frame_size)
535 {
536  int ret, format_video_sync;
537  AVPacket pkt;
538  AVCodecContext *enc = ost->st->codec;
539 
540  *frame_size = 0;
541 
542  format_video_sync = video_sync_method;
543  if (format_video_sync == VSYNC_AUTO)
544  format_video_sync = (s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH :
546  if (format_video_sync != VSYNC_PASSTHROUGH &&
547  ost->frame_number &&
548  in_picture->pts != AV_NOPTS_VALUE &&
549  in_picture->pts < ost->sync_opts) {
550  nb_frames_drop++;
551  av_log(NULL, AV_LOG_VERBOSE, "*** drop!\n");
552  return;
553  }
554 
555  if (in_picture->pts == AV_NOPTS_VALUE)
556  in_picture->pts = ost->sync_opts;
557  ost->sync_opts = in_picture->pts;
558 
559 
560  if (!ost->frame_number)
561  ost->first_pts = in_picture->pts;
562 
563  av_init_packet(&pkt);
564  pkt.data = NULL;
565  pkt.size = 0;
566 
567  if (!check_recording_time(ost) ||
568  ost->frame_number >= ost->max_frames)
569  return;
570 
571  if (s->oformat->flags & AVFMT_RAWPICTURE &&
572  enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
573  /* raw pictures are written as AVPicture structure to
574  avoid any copies. We support temporarily the older
575  method. */
576  enc->coded_frame->interlaced_frame = in_picture->interlaced_frame;
577  enc->coded_frame->top_field_first = in_picture->top_field_first;
578  pkt.data = (uint8_t *)in_picture;
579  pkt.size = sizeof(AVPicture);
580  pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
581  pkt.flags |= AV_PKT_FLAG_KEY;
582 
583  write_frame(s, &pkt, ost);
584  } else {
585  int got_packet;
586  AVFrame big_picture;
587 
588  big_picture = *in_picture;
589  /* better than nothing: use input picture interlaced
590  settings */
591  big_picture.interlaced_frame = in_picture->interlaced_frame;
593  if (ost->top_field_first == -1)
594  big_picture.top_field_first = in_picture->top_field_first;
595  else
596  big_picture.top_field_first = !!ost->top_field_first;
597  }
598 
599  big_picture.quality = ost->st->codec->global_quality;
600  if (!enc->me_threshold)
601  big_picture.pict_type = 0;
602  if (ost->forced_kf_index < ost->forced_kf_count &&
603  big_picture.pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
604  big_picture.pict_type = AV_PICTURE_TYPE_I;
605  ost->forced_kf_index++;
606  }
607  ret = avcodec_encode_video2(enc, &pkt, &big_picture, &got_packet);
608  if (ret < 0) {
609  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
610  exit(1);
611  }
612 
613  if (got_packet) {
614  if (pkt.pts != AV_NOPTS_VALUE)
615  pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
616  if (pkt.dts != AV_NOPTS_VALUE)
617  pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
618 
619  write_frame(s, &pkt, ost);
620  *frame_size = pkt.size;
621  video_size += pkt.size;
622 
623  /* if two pass, output log */
624  if (ost->logfile && enc->stats_out) {
625  fprintf(ost->logfile, "%s", enc->stats_out);
626  }
627  }
628  }
629  ost->sync_opts++;
630  /*
631  * For video, number of frames in == number of packets out.
632  * But there may be reordering, so we can't throw away frames on encoder
633  * flush, we need to limit them here, before they go into encoder.
634  */
635  ost->frame_number++;
636 }
637 
638 static double psnr(double d)
639 {
640  return -10.0 * log(d) / log(10.0);
641 }
642 
643 static void do_video_stats(OutputStream *ost, int frame_size)
644 {
645  AVCodecContext *enc;
646  int frame_number;
647  double ti1, bitrate, avg_bitrate;
648 
649  /* this is executed just the first time do_video_stats is called */
650  if (!vstats_file) {
651  vstats_file = fopen(vstats_filename, "w");
652  if (!vstats_file) {
653  perror("fopen");
654  exit(1);
655  }
656  }
657 
658  enc = ost->st->codec;
659  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
660  frame_number = ost->frame_number;
661  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality / (float)FF_QP2LAMBDA);
662  if (enc->flags&CODEC_FLAG_PSNR)
663  fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
664 
665  fprintf(vstats_file,"f_size= %6d ", frame_size);
666  /* compute pts value */
667  ti1 = ost->sync_opts * av_q2d(enc->time_base);
668  if (ti1 < 0.01)
669  ti1 = 0.01;
670 
671  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
672  avg_bitrate = (double)(video_size * 8) / ti1 / 1000.0;
673  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
674  (double)video_size / 1024, ti1, bitrate, avg_bitrate);
675  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(enc->coded_frame->pict_type));
676  }
677 }
678 
679 /*
680  * Read one frame for lavfi output for ost and encode it.
681  */
682 static int poll_filter(OutputStream *ost)
683 {
684  OutputFile *of = output_files[ost->file_index];
685  AVFilterBufferRef *picref;
686  AVFrame *filtered_frame = NULL;
687  int frame_size, ret;
688 
689  if (!ost->filtered_frame && !(ost->filtered_frame = avcodec_alloc_frame())) {
690  return AVERROR(ENOMEM);
691  } else
693  filtered_frame = ost->filtered_frame;
694 
695  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
697  ret = av_buffersink_read_samples(ost->filter->filter, &picref,
698  ost->st->codec->frame_size);
699  else
700  ret = av_buffersink_read(ost->filter->filter, &picref);
701 
702  if (ret < 0)
703  return ret;
704 
705  avfilter_copy_buf_props(filtered_frame, picref);
706  if (picref->pts != AV_NOPTS_VALUE) {
707  filtered_frame->pts = av_rescale_q(picref->pts,
708  ost->filter->filter->inputs[0]->time_base,
709  ost->st->codec->time_base) -
712  ost->st->codec->time_base);
713 
714  if (of->start_time && filtered_frame->pts < 0) {
715  avfilter_unref_buffer(picref);
716  return 0;
717  }
718  }
719 
720  switch (ost->filter->filter->inputs[0]->type) {
721  case AVMEDIA_TYPE_VIDEO:
722  if (!ost->frame_aspect_ratio)
723  ost->st->codec->sample_aspect_ratio = picref->video->pixel_aspect;
724 
725  do_video_out(of->ctx, ost, filtered_frame, &frame_size);
726  if (vstats_filename && frame_size)
727  do_video_stats(ost, frame_size);
728  break;
729  case AVMEDIA_TYPE_AUDIO:
730  do_audio_out(of->ctx, ost, filtered_frame);
731  break;
732  default:
733  // TODO support subtitle filters
734  av_assert0(0);
735  }
736 
737  avfilter_unref_buffer(picref);
738 
739  return 0;
740 }
741 
743 {
744  OutputFile *of = output_files[ost->file_index];
745  int i;
746 
747  ost->finished = 1;
748 
749  if (of->shortest) {
750  for (i = 0; i < of->ctx->nb_streams; i++)
751  output_streams[of->ost_index + i]->finished = 1;
752  }
753 }
754 
755 /*
756  * Read as many frames from possible from lavfi and encode them.
757  *
758  * Always read from the active stream with the lowest timestamp. If no frames
759  * are available for it then return EAGAIN and wait for more input. This way we
760  * can use lavfi sources that generate unlimited amount of frames without memory
761  * usage exploding.
762  */
763 static int poll_filters(void)
764 {
765  int i, ret = 0;
766 
767  while (ret >= 0 && !received_sigterm) {
768  OutputStream *ost = NULL;
769  int64_t min_pts = INT64_MAX;
770 
771  /* choose output stream with the lowest timestamp */
772  for (i = 0; i < nb_output_streams; i++) {
773  int64_t pts = output_streams[i]->sync_opts;
774 
775  if (!output_streams[i]->filter || output_streams[i]->finished)
776  continue;
777 
778  pts = av_rescale_q(pts, output_streams[i]->st->codec->time_base,
780  if (pts < min_pts) {
781  min_pts = pts;
782  ost = output_streams[i];
783  }
784  }
785 
786  if (!ost)
787  break;
788 
789  ret = poll_filter(ost);
790 
791  if (ret == AVERROR_EOF) {
793  ret = 0;
794  } else if (ret == AVERROR(EAGAIN))
795  return 0;
796  }
797 
798  return ret;
799 }
800 
801 static void print_report(int is_last_report, int64_t timer_start)
802 {
803  char buf[1024];
804  OutputStream *ost;
805  AVFormatContext *oc;
806  int64_t total_size;
807  AVCodecContext *enc;
808  int frame_number, vid, i;
809  double bitrate, ti1, pts;
810  static int64_t last_time = -1;
811  static int qp_histogram[52];
812 
813  if (!print_stats && !is_last_report)
814  return;
815 
816  if (!is_last_report) {
817  int64_t cur_time;
818  /* display the report every 0.5 seconds */
819  cur_time = av_gettime();
820  if (last_time == -1) {
821  last_time = cur_time;
822  return;
823  }
824  if ((cur_time - last_time) < 500000)
825  return;
826  last_time = cur_time;
827  }
828 
829 
830  oc = output_files[0]->ctx;
831 
832  total_size = avio_size(oc->pb);
833  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
834  total_size = avio_tell(oc->pb);
835  if (total_size < 0) {
836  char errbuf[128];
837  av_strerror(total_size, errbuf, sizeof(errbuf));
838  av_log(NULL, AV_LOG_VERBOSE, "Bitrate not available, "
839  "avio_tell() failed: %s\n", errbuf);
840  total_size = 0;
841  }
842 
843  buf[0] = '\0';
844  ti1 = 1e10;
845  vid = 0;
846  for (i = 0; i < nb_output_streams; i++) {
847  float q = -1;
848  ost = output_streams[i];
849  enc = ost->st->codec;
850  if (!ost->stream_copy && enc->coded_frame)
851  q = enc->coded_frame->quality / (float)FF_QP2LAMBDA;
852  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
853  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
854  }
855  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
856  float t = (av_gettime() - timer_start) / 1000000.0;
857 
858  frame_number = ost->frame_number;
859  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3d q=%3.1f ",
860  frame_number, (t > 1) ? (int)(frame_number / t + 0.5) : 0, q);
861  if (is_last_report)
862  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
863  if (qp_hist) {
864  int j;
865  int qp = lrintf(q);
866  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
867  qp_histogram[qp]++;
868  for (j = 0; j < 32; j++)
869  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log2(qp_histogram[j] + 1)));
870  }
871  if (enc->flags&CODEC_FLAG_PSNR) {
872  int j;
873  double error, error_sum = 0;
874  double scale, scale_sum = 0;
875  char type[3] = { 'Y','U','V' };
876  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
877  for (j = 0; j < 3; j++) {
878  if (is_last_report) {
879  error = enc->error[j];
880  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
881  } else {
882  error = enc->coded_frame->error[j];
883  scale = enc->width * enc->height * 255.0 * 255.0;
884  }
885  if (j)
886  scale /= 4;
887  error_sum += error;
888  scale_sum += scale;
889  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], psnr(error / scale));
890  }
891  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
892  }
893  vid = 1;
894  }
895  /* compute min output value */
896  pts = (double)ost->st->pts.val * av_q2d(ost->st->time_base);
897  if ((pts < ti1) && (pts > 0))
898  ti1 = pts;
899  }
900  if (ti1 < 0.01)
901  ti1 = 0.01;
902 
903  bitrate = (double)(total_size * 8) / ti1 / 1000.0;
904 
905  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
906  "size=%8.0fkB time=%0.2f bitrate=%6.1fkbits/s",
907  (double)total_size / 1024, ti1, bitrate);
908 
910  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
912 
913  av_log(NULL, AV_LOG_INFO, "%s \r", buf);
914 
915  fflush(stderr);
916 
917  if (is_last_report) {
918  int64_t raw= audio_size + video_size + extra_size;
919  av_log(NULL, AV_LOG_INFO, "\n");
920  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB global headers:%1.0fkB muxing overhead %f%%\n",
921  video_size / 1024.0,
922  audio_size / 1024.0,
923  extra_size / 1024.0,
924  100.0 * (total_size - raw) / raw
925  );
926  }
927 }
928 
929 static void flush_encoders(void)
930 {
931  int i, ret;
932 
933  for (i = 0; i < nb_output_streams; i++) {
934  OutputStream *ost = output_streams[i];
935  AVCodecContext *enc = ost->st->codec;
936  AVFormatContext *os = output_files[ost->file_index]->ctx;
937  int stop_encoding = 0;
938 
939  if (!ost->encoding_needed)
940  continue;
941 
942  if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
943  continue;
945  continue;
946 
947  for (;;) {
948  int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
949  const char *desc;
950  int64_t *size;
951 
952  switch (ost->st->codec->codec_type) {
953  case AVMEDIA_TYPE_AUDIO:
954  encode = avcodec_encode_audio2;
955  desc = "Audio";
956  size = &audio_size;
957  break;
958  case AVMEDIA_TYPE_VIDEO:
959  encode = avcodec_encode_video2;
960  desc = "Video";
961  size = &video_size;
962  break;
963  default:
964  stop_encoding = 1;
965  }
966 
967  if (encode) {
968  AVPacket pkt;
969  int got_packet;
970  av_init_packet(&pkt);
971  pkt.data = NULL;
972  pkt.size = 0;
973 
974  ret = encode(enc, &pkt, NULL, &got_packet);
975  if (ret < 0) {
976  av_log(NULL, AV_LOG_FATAL, "%s encoding failed\n", desc);
977  exit(1);
978  }
979  *size += ret;
980  if (ost->logfile && enc->stats_out) {
981  fprintf(ost->logfile, "%s", enc->stats_out);
982  }
983  if (!got_packet) {
984  stop_encoding = 1;
985  break;
986  }
987  if (pkt.pts != AV_NOPTS_VALUE)
988  pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
989  if (pkt.dts != AV_NOPTS_VALUE)
990  pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
991  if (pkt.duration > 0)
992  pkt.duration = av_rescale_q(pkt.duration, enc->time_base, ost->st->time_base);
993  write_frame(os, &pkt, ost);
994  }
995 
996  if (stop_encoding)
997  break;
998  }
999  }
1000 }
1001 
1002 /*
1003  * Check whether a packet from ist should be written into ost at this time
1004  */
1006 {
1007  OutputFile *of = output_files[ost->file_index];
1008  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1009 
1010  if (ost->source_index != ist_index)
1011  return 0;
1012 
1013  if (of->start_time && ist->last_dts < of->start_time)
1014  return 0;
1015 
1016  return 1;
1017 }
1018 
1019 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1020 {
1021  OutputFile *of = output_files[ost->file_index];
1022  int64_t ost_tb_start_time = av_rescale_q(of->start_time, AV_TIME_BASE_Q, ost->st->time_base);
1023  AVPacket opkt;
1024 
1025  av_init_packet(&opkt);
1026 
1027  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1029  return;
1030 
1031  if (of->recording_time != INT64_MAX &&
1032  ist->last_dts >= of->recording_time + of->start_time) {
1033  ost->finished = 1;
1034  return;
1035  }
1036 
1037  /* force the input stream PTS */
1038  if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1039  audio_size += pkt->size;
1040  else if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
1041  video_size += pkt->size;
1042  ost->sync_opts++;
1043  }
1044 
1045  if (pkt->pts != AV_NOPTS_VALUE)
1046  opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1047  else
1048  opkt.pts = AV_NOPTS_VALUE;
1049 
1050  if (pkt->dts == AV_NOPTS_VALUE)
1051  opkt.dts = av_rescale_q(ist->last_dts, AV_TIME_BASE_Q, ost->st->time_base);
1052  else
1053  opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1054  opkt.dts -= ost_tb_start_time;
1055 
1056  opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1057  opkt.flags = pkt->flags;
1058 
1059  // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1060  if ( ost->st->codec->codec_id != AV_CODEC_ID_H264
1063  && ost->st->codec->codec_id != AV_CODEC_ID_VC1
1064  ) {
1065  if (av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, pkt->data, pkt->size, pkt->flags & AV_PKT_FLAG_KEY))
1067  } else {
1068  opkt.data = pkt->data;
1069  opkt.size = pkt->size;
1070  }
1071 
1072  write_frame(of->ctx, &opkt, ost);
1073  ost->st->codec->frame_number++;
1074 }
1075 
1076 static void rate_emu_sleep(InputStream *ist)
1077 {
1078  if (input_files[ist->file_index]->rate_emu) {
1079  int64_t pts = av_rescale(ist->last_dts, 1000000, AV_TIME_BASE);
1080  int64_t now = av_gettime() - ist->start;
1081  if (pts > now)
1082  av_usleep(pts - now);
1083  }
1084 }
1085 
1087 {
1088  AVCodecContext *dec = ist->st->codec;
1089 
1090  if (!dec->channel_layout) {
1091  char layout_name[256];
1092 
1094  if (!dec->channel_layout)
1095  return 0;
1096  av_get_channel_layout_string(layout_name, sizeof(layout_name),
1097  dec->channels, dec->channel_layout);
1098  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1099  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1100  }
1101  return 1;
1102 }
1103 
1104 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1105 {
1106  AVFrame *decoded_frame;
1107  AVCodecContext *avctx = ist->st->codec;
1108  int i, ret, resample_changed;
1109 
1110  if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
1111  return AVERROR(ENOMEM);
1112  decoded_frame = ist->decoded_frame;
1113 
1114  ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1115  if (!*got_output || ret < 0) {
1116  if (!pkt->size) {
1117  for (i = 0; i < ist->nb_filters; i++)
1119  }
1120  return ret;
1121  }
1122 
1123  /* if the decoder provides a pts, use it instead of the last packet pts.
1124  the decoder could be delaying output by a packet or more. */
1125  if (decoded_frame->pts != AV_NOPTS_VALUE)
1126  ist->next_dts = decoded_frame->pts;
1127  else if (pkt->pts != AV_NOPTS_VALUE) {
1128  decoded_frame->pts = pkt->pts;
1129  pkt->pts = AV_NOPTS_VALUE;
1130  }
1131 
1132  rate_emu_sleep(ist);
1133 
1134  resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
1135  ist->resample_channels != avctx->channels ||
1136  ist->resample_channel_layout != decoded_frame->channel_layout ||
1137  ist->resample_sample_rate != decoded_frame->sample_rate;
1138  if (resample_changed) {
1139  char layout1[64], layout2[64];
1140 
1141  if (!guess_input_channel_layout(ist)) {
1142  av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
1143  "layout for Input Stream #%d.%d\n", ist->file_index,
1144  ist->st->index);
1145  exit(1);
1146  }
1147  decoded_frame->channel_layout = avctx->channel_layout;
1148 
1149  av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
1151  av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
1152  decoded_frame->channel_layout);
1153 
1155  "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
1156  ist->file_index, ist->st->index,
1158  ist->resample_channels, layout1,
1159  decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
1160  avctx->channels, layout2);
1161 
1162  ist->resample_sample_fmt = decoded_frame->format;
1163  ist->resample_sample_rate = decoded_frame->sample_rate;
1164  ist->resample_channel_layout = decoded_frame->channel_layout;
1165  ist->resample_channels = avctx->channels;
1166 
1167  for (i = 0; i < nb_filtergraphs; i++)
1168  if (ist_in_filtergraph(filtergraphs[i], ist) &&
1169  configure_filtergraph(filtergraphs[i]) < 0) {
1170  av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
1171  exit(1);
1172  }
1173  }
1174 
1175  if (decoded_frame->pts != AV_NOPTS_VALUE)
1176  decoded_frame->pts = av_rescale_q(decoded_frame->pts,
1177  ist->st->time_base,
1178  (AVRational){1, ist->st->codec->sample_rate});
1179  for (i = 0; i < ist->nb_filters; i++)
1180  av_buffersrc_write_frame(ist->filters[i]->filter, decoded_frame);
1181 
1182  return ret;
1183 }
1184 
1185 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
1186 {
1187  AVFrame *decoded_frame;
1188  void *buffer_to_free = NULL;
1189  int i, ret = 0, resample_changed;
1190 
1191  if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
1192  return AVERROR(ENOMEM);
1193  decoded_frame = ist->decoded_frame;
1194 
1195  ret = avcodec_decode_video2(ist->st->codec,
1196  decoded_frame, got_output, pkt);
1197  if (!*got_output || ret < 0) {
1198  if (!pkt->size) {
1199  for (i = 0; i < ist->nb_filters; i++)
1201  }
1202  return ret;
1203  }
1204 
1205  decoded_frame->pts = guess_correct_pts(&ist->pts_ctx, decoded_frame->pkt_pts,
1206  decoded_frame->pkt_dts);
1207  pkt->size = 0;
1208  pre_process_video_frame(ist, (AVPicture *)decoded_frame, &buffer_to_free);
1209 
1210  rate_emu_sleep(ist);
1211 
1212  if (ist->st->sample_aspect_ratio.num)
1213  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
1214 
1215  resample_changed = ist->resample_width != decoded_frame->width ||
1216  ist->resample_height != decoded_frame->height ||
1217  ist->resample_pix_fmt != decoded_frame->format;
1218  if (resample_changed) {
1220  "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
1221  ist->file_index, ist->st->index,
1223  decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
1224 
1225  ret = poll_filters();
1226  if (ret < 0 && (ret != AVERROR_EOF && ret != AVERROR(EAGAIN)))
1227  av_log(NULL, AV_LOG_ERROR, "Error while filtering.\n");
1228 
1229  ist->resample_width = decoded_frame->width;
1230  ist->resample_height = decoded_frame->height;
1231  ist->resample_pix_fmt = decoded_frame->format;
1232 
1233  for (i = 0; i < nb_filtergraphs; i++)
1234  if (ist_in_filtergraph(filtergraphs[i], ist) &&
1235  configure_filtergraph(filtergraphs[i]) < 0) {
1236  av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
1237  exit(1);
1238  }
1239  }
1240 
1241  for (i = 0; i < ist->nb_filters; i++) {
1242  if (ist->st->codec->codec->capabilities & CODEC_CAP_DR1 && !do_deinterlace) {
1243  FrameBuffer *buf = decoded_frame->opaque;
1245  decoded_frame->data, decoded_frame->linesize,
1247  ist->st->codec->width, ist->st->codec->height,
1248  ist->st->codec->pix_fmt);
1249 
1250  avfilter_copy_frame_props(fb, decoded_frame);
1251  fb->buf->priv = buf;
1253 
1254  buf->refcount++;
1255  av_buffersrc_buffer(ist->filters[i]->filter, fb);
1256  } else
1257  av_buffersrc_write_frame(ist->filters[i]->filter, decoded_frame);
1258  }
1259 
1260  av_free(buffer_to_free);
1261  return ret;
1262 }
1263 
1264 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
1265 {
1266  AVSubtitle subtitle;
1267  int i, ret = avcodec_decode_subtitle2(ist->st->codec,
1268  &subtitle, got_output, pkt);
1269  if (ret < 0)
1270  return ret;
1271  if (!*got_output)
1272  return ret;
1273 
1274  rate_emu_sleep(ist);
1275 
1276  for (i = 0; i < nb_output_streams; i++) {
1277  OutputStream *ost = output_streams[i];
1278 
1279  if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
1280  continue;
1281 
1282  do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle, pkt->pts);
1283  }
1284 
1285  avsubtitle_free(&subtitle);
1286  return ret;
1287 }
1288 
1289 /* pkt = NULL means EOF (needed to flush decoder buffers) */
1290 static int output_packet(InputStream *ist, const AVPacket *pkt)
1291 {
1292  int i;
1293  int got_output;
1294  AVPacket avpkt;
1295 
1296  if (ist->next_dts == AV_NOPTS_VALUE)
1297  ist->next_dts = ist->last_dts;
1298 
1299  if (pkt == NULL) {
1300  /* EOF handling */
1301  av_init_packet(&avpkt);
1302  avpkt.data = NULL;
1303  avpkt.size = 0;
1304  goto handle_eof;
1305  } else {
1306  avpkt = *pkt;
1307  }
1308 
1309  if (pkt->dts != AV_NOPTS_VALUE)
1310  ist->next_dts = ist->last_dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
1311 
1312  // while we have more to decode or while the decoder did output something on EOF
1313  while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
1314  int ret = 0;
1315  handle_eof:
1316 
1317  ist->last_dts = ist->next_dts;
1318 
1319  if (avpkt.size && avpkt.size != pkt->size) {
1321  "Multiple frames in a packet from stream %d\n", pkt->stream_index);
1322  ist->showed_multi_packet_warning = 1;
1323  }
1324 
1325  switch (ist->st->codec->codec_type) {
1326  case AVMEDIA_TYPE_AUDIO:
1327  ret = decode_audio (ist, &avpkt, &got_output);
1328  break;
1329  case AVMEDIA_TYPE_VIDEO:
1330  ret = decode_video (ist, &avpkt, &got_output);
1331  if (avpkt.duration)
1332  ist->next_dts += av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
1333  else if (ist->st->avg_frame_rate.num)
1334  ist->next_dts += av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate),
1335  AV_TIME_BASE_Q);
1336  else if (ist->st->codec->time_base.num != 0) {
1337  int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 :
1338  ist->st->codec->ticks_per_frame;
1339  ist->next_dts += av_rescale_q(ticks, ist->st->codec->time_base, AV_TIME_BASE_Q);
1340  }
1341  break;
1342  case AVMEDIA_TYPE_SUBTITLE:
1343  ret = transcode_subtitles(ist, &avpkt, &got_output);
1344  break;
1345  default:
1346  return -1;
1347  }
1348 
1349  if (ret < 0)
1350  return ret;
1351  // touch data and size only if not EOF
1352  if (pkt) {
1353  avpkt.data += ret;
1354  avpkt.size -= ret;
1355  }
1356  if (!got_output) {
1357  continue;
1358  }
1359  }
1360 
1361  /* handle stream copy */
1362  if (!ist->decoding_needed) {
1363  rate_emu_sleep(ist);
1364  ist->last_dts = ist->next_dts;
1365  switch (ist->st->codec->codec_type) {
1366  case AVMEDIA_TYPE_AUDIO:
1367  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->st->codec->frame_size) /
1368  ist->st->codec->sample_rate;
1369  break;
1370  case AVMEDIA_TYPE_VIDEO:
1371  if (ist->st->codec->time_base.num != 0) {
1372  int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->st->codec->ticks_per_frame;
1373  ist->next_dts += ((int64_t)AV_TIME_BASE *
1374  ist->st->codec->time_base.num * ticks) /
1375  ist->st->codec->time_base.den;
1376  }
1377  break;
1378  }
1379  }
1380  for (i = 0; pkt && i < nb_output_streams; i++) {
1381  OutputStream *ost = output_streams[i];
1382 
1383  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
1384  continue;
1385 
1386  do_streamcopy(ist, ost, pkt);
1387  }
1388 
1389  return 0;
1390 }
1391 
1392 static void print_sdp(void)
1393 {
1394  char sdp[2048];
1395  int i;
1396  AVFormatContext **avc = av_malloc(sizeof(*avc) * nb_output_files);
1397 
1398  if (!avc)
1399  exit(1);
1400  for (i = 0; i < nb_output_files; i++)
1401  avc[i] = output_files[i]->ctx;
1402 
1403  av_sdp_create(avc, nb_output_files, sdp, sizeof(sdp));
1404  printf("SDP:\n%s\n", sdp);
1405  fflush(stdout);
1406  av_freep(&avc);
1407 }
1408 
1409 static int init_input_stream(int ist_index, char *error, int error_len)
1410 {
1411  int i, ret;
1412  InputStream *ist = input_streams[ist_index];
1413  if (ist->decoding_needed) {
1414  AVCodec *codec = ist->dec;
1415  if (!codec) {
1416  snprintf(error, error_len, "Decoder (codec id %d) not found for input stream #%d:%d",
1417  ist->st->codec->codec_id, ist->file_index, ist->st->index);
1418  return AVERROR(EINVAL);
1419  }
1420 
1421  /* update requested sample format for the decoder based on the
1422  corresponding encoder sample format */
1423  for (i = 0; i < nb_output_streams; i++) {
1424  OutputStream *ost = output_streams[i];
1425  if (ost->source_index == ist_index) {
1426  update_sample_fmt(ist->st->codec, codec, ost->st->codec);
1427  break;
1428  }
1429  }
1430 
1431  if (codec->type == AVMEDIA_TYPE_VIDEO && codec->capabilities & CODEC_CAP_DR1) {
1434  ist->st->codec->opaque = &ist->buffer_pool;
1435  }
1436 
1437  if (!av_dict_get(ist->opts, "threads", NULL, 0))
1438  av_dict_set(&ist->opts, "threads", "auto", 0);
1439  if ((ret = avcodec_open2(ist->st->codec, codec, &ist->opts)) < 0) {
1440  if (ret == AVERROR_EXPERIMENTAL)
1441  abort_codec_experimental(codec, 0);
1442  snprintf(error, error_len, "Error while opening decoder for input stream #%d:%d",
1443  ist->file_index, ist->st->index);
1444  return ret;
1445  }
1446  assert_avoptions(ist->opts);
1447  }
1448 
1449  ist->last_dts = ist->st->avg_frame_rate.num ? - ist->st->codec->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
1450  ist->next_dts = AV_NOPTS_VALUE;
1452  ist->is_start = 1;
1453 
1454  return 0;
1455 }
1456 
1458 {
1459  if (ost->source_index >= 0)
1460  return input_streams[ost->source_index];
1461 
1462  if (ost->filter) {
1463  FilterGraph *fg = ost->filter->graph;
1464  int i;
1465 
1466  for (i = 0; i < fg->nb_inputs; i++)
1467  if (fg->inputs[i]->ist->st->codec->codec_type == ost->st->codec->codec_type)
1468  return fg->inputs[i]->ist;
1469  }
1470 
1471  return NULL;
1472 }
1473 
1474 static void parse_forced_key_frames(char *kf, OutputStream *ost,
1475  AVCodecContext *avctx)
1476 {
1477  char *p;
1478  int n = 1, i;
1479  int64_t t;
1480 
1481  for (p = kf; *p; p++)
1482  if (*p == ',')
1483  n++;
1484  ost->forced_kf_count = n;
1485  ost->forced_kf_pts = av_malloc(sizeof(*ost->forced_kf_pts) * n);
1486  if (!ost->forced_kf_pts) {
1487  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
1488  exit(1);
1489  }
1490 
1491  p = kf;
1492  for (i = 0; i < n; i++) {
1493  char *next = strchr(p, ',');
1494 
1495  if (next)
1496  *next++ = 0;
1497 
1498  t = parse_time_or_die("force_key_frames", p, 1);
1499  ost->forced_kf_pts[i] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
1500 
1501  p = next;
1502  }
1503 }
1504 
1505 static int transcode_init(void)
1506 {
1507  int ret = 0, i, j, k;
1508  AVFormatContext *oc;
1509  AVCodecContext *codec;
1510  OutputStream *ost;
1511  InputStream *ist;
1512  char error[1024];
1513  int want_sdp = 1;
1514 
1515  /* init framerate emulation */
1516  for (i = 0; i < nb_input_files; i++) {
1517  InputFile *ifile = input_files[i];
1518  if (ifile->rate_emu)
1519  for (j = 0; j < ifile->nb_streams; j++)
1520  input_streams[j + ifile->ist_index]->start = av_gettime();
1521  }
1522 
1523  /* output stream init */
1524  for (i = 0; i < nb_output_files; i++) {
1525  oc = output_files[i]->ctx;
1526  if (!oc->nb_streams && !(oc->oformat->flags & AVFMT_NOSTREAMS)) {
1527  av_dump_format(oc, i, oc->filename, 1);
1528  av_log(NULL, AV_LOG_ERROR, "Output file #%d does not contain any stream\n", i);
1529  return AVERROR(EINVAL);
1530  }
1531  }
1532 
1533  /* init complex filtergraphs */
1534  for (i = 0; i < nb_filtergraphs; i++)
1535  if ((ret = avfilter_graph_config(filtergraphs[i]->graph, NULL)) < 0)
1536  return ret;
1537 
1538  /* for each output stream, we compute the right encoding parameters */
1539  for (i = 0; i < nb_output_streams; i++) {
1540  AVCodecContext *icodec = NULL;
1541  ost = output_streams[i];
1542  oc = output_files[ost->file_index]->ctx;
1543  ist = get_input_stream(ost);
1544 
1545  if (ost->attachment_filename)
1546  continue;
1547 
1548  codec = ost->st->codec;
1549 
1550  if (ist) {
1551  icodec = ist->st->codec;
1552 
1553  ost->st->disposition = ist->st->disposition;
1554  codec->bits_per_raw_sample = icodec->bits_per_raw_sample;
1556  }
1557 
1558  if (ost->stream_copy) {
1559  uint64_t extra_size;
1560 
1561  av_assert0(ist && !ost->filter);
1562 
1563  extra_size = (uint64_t)icodec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE;
1564 
1565  if (extra_size > INT_MAX) {
1566  return AVERROR(EINVAL);
1567  }
1568 
1569  /* if stream_copy is selected, no need to decode or encode */
1570  codec->codec_id = icodec->codec_id;
1571  codec->codec_type = icodec->codec_type;
1572 
1573  if (!codec->codec_tag) {
1574  if (!oc->oformat->codec_tag ||
1575  av_codec_get_id (oc->oformat->codec_tag, icodec->codec_tag) == codec->codec_id ||
1576  av_codec_get_tag(oc->oformat->codec_tag, icodec->codec_id) <= 0)
1577  codec->codec_tag = icodec->codec_tag;
1578  }
1579 
1580  codec->bit_rate = icodec->bit_rate;
1581  codec->rc_max_rate = icodec->rc_max_rate;
1582  codec->rc_buffer_size = icodec->rc_buffer_size;
1583  codec->field_order = icodec->field_order;
1584  codec->extradata = av_mallocz(extra_size);
1585  if (!codec->extradata) {
1586  return AVERROR(ENOMEM);
1587  }
1588  memcpy(codec->extradata, icodec->extradata, icodec->extradata_size);
1589  codec->extradata_size = icodec->extradata_size;
1590  if (!copy_tb) {
1591  codec->time_base = icodec->time_base;
1592  codec->time_base.num *= icodec->ticks_per_frame;
1593  av_reduce(&codec->time_base.num, &codec->time_base.den,
1594  codec->time_base.num, codec->time_base.den, INT_MAX);
1595  } else
1596  codec->time_base = ist->st->time_base;
1597 
1598  switch (codec->codec_type) {
1599  case AVMEDIA_TYPE_AUDIO:
1600  if (audio_volume != 256) {
1601  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
1602  exit(1);
1603  }
1604  codec->channel_layout = icodec->channel_layout;
1605  codec->sample_rate = icodec->sample_rate;
1606  codec->channels = icodec->channels;
1607  codec->frame_size = icodec->frame_size;
1608  codec->audio_service_type = icodec->audio_service_type;
1609  codec->block_align = icodec->block_align;
1610  break;
1611  case AVMEDIA_TYPE_VIDEO:
1612  codec->pix_fmt = icodec->pix_fmt;
1613  codec->width = icodec->width;
1614  codec->height = icodec->height;
1615  codec->has_b_frames = icodec->has_b_frames;
1616  if (!codec->sample_aspect_ratio.num) {
1617  codec->sample_aspect_ratio =
1618  ost->st->sample_aspect_ratio =
1620  ist->st->codec->sample_aspect_ratio.num ?
1621  ist->st->codec->sample_aspect_ratio : (AVRational){0, 1};
1622  }
1623  break;
1624  case AVMEDIA_TYPE_SUBTITLE:
1625  codec->width = icodec->width;
1626  codec->height = icodec->height;
1627  break;
1628  case AVMEDIA_TYPE_DATA:
1630  break;
1631  default:
1632  abort();
1633  }
1634  } else {
1635  if (!ost->enc) {
1636  /* should only happen when a default codec is not present. */
1637  snprintf(error, sizeof(error), "Automatic encoder selection "
1638  "failed for output stream #%d:%d. Default encoder for "
1639  "format %s is probably disabled. Please choose an "
1640  "encoder manually.\n", ost->file_index, ost->index,
1641  oc->oformat->name);
1642  ret = AVERROR(EINVAL);
1643  goto dump_format;
1644  }
1645 
1646  if (ist)
1647  ist->decoding_needed = 1;
1648  ost->encoding_needed = 1;
1649 
1650  /*
1651  * We want CFR output if and only if one of those is true:
1652  * 1) user specified output framerate with -r
1653  * 2) user specified -vsync cfr
1654  * 3) output format is CFR and the user didn't force vsync to
1655  * something else than CFR
1656  *
1657  * in such a case, set ost->frame_rate
1658  */
1659  if (codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1660  !ost->frame_rate.num && ist &&
1664  ost->frame_rate = ist->framerate.num ? ist->framerate :
1665  ist->st->avg_frame_rate.num ?
1666  ist->st->avg_frame_rate :
1667  (AVRational){25, 1};
1668 
1669  if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
1671  ost->frame_rate = ost->enc->supported_framerates[idx];
1672  }
1673  }
1674 
1675  if (!ost->filter &&
1676  (codec->codec_type == AVMEDIA_TYPE_VIDEO ||
1677  codec->codec_type == AVMEDIA_TYPE_AUDIO)) {
1678  FilterGraph *fg;
1679  fg = init_simple_filtergraph(ist, ost);
1680  if (configure_filtergraph(fg)) {
1681  av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
1682  exit(1);
1683  }
1684  }
1685 
1686  switch (codec->codec_type) {
1687  case AVMEDIA_TYPE_AUDIO:
1688  codec->sample_fmt = ost->filter->filter->inputs[0]->format;
1689  codec->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
1690  codec->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
1692  codec->time_base = (AVRational){ 1, codec->sample_rate };
1693  break;
1694  case AVMEDIA_TYPE_VIDEO:
1695  codec->time_base = ost->filter->filter->inputs[0]->time_base;
1696 
1697  codec->width = ost->filter->filter->inputs[0]->w;
1698  codec->height = ost->filter->filter->inputs[0]->h;
1699  codec->sample_aspect_ratio = ost->st->sample_aspect_ratio =
1700  ost->frame_aspect_ratio ? // overridden by the -aspect cli option
1701  av_d2q(ost->frame_aspect_ratio * codec->height/codec->width, 255) :
1703  codec->pix_fmt = ost->filter->filter->inputs[0]->format;
1704 
1705  if (icodec &&
1706  (codec->width != icodec->width ||
1707  codec->height != icodec->height ||
1708  codec->pix_fmt != icodec->pix_fmt)) {
1709  codec->bits_per_raw_sample = 0;
1710  }
1711 
1712  if (ost->forced_keyframes)
1714  ost->st->codec);
1715  break;
1716  case AVMEDIA_TYPE_SUBTITLE:
1717  codec->time_base = (AVRational){1, 1000};
1718  break;
1719  default:
1720  abort();
1721  break;
1722  }
1723  /* two pass mode */
1724  if ((codec->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))) {
1725  char logfilename[1024];
1726  FILE *f;
1727 
1728  snprintf(logfilename, sizeof(logfilename), "%s-%d.log",
1729  ost->logfile_prefix ? ost->logfile_prefix :
1731  i);
1732  if (!strcmp(ost->enc->name, "libx264")) {
1733  av_dict_set(&ost->opts, "stats", logfilename, AV_DICT_DONT_OVERWRITE);
1734  } else {
1735  if (codec->flags & CODEC_FLAG_PASS1) {
1736  f = fopen(logfilename, "wb");
1737  if (!f) {
1738  av_log(NULL, AV_LOG_FATAL, "Cannot write log file '%s' for pass-1 encoding: %s\n",
1739  logfilename, strerror(errno));
1740  exit(1);
1741  }
1742  ost->logfile = f;
1743  } else {
1744  char *logbuffer;
1745  size_t logbuffer_size;
1746  if (cmdutils_read_file(logfilename, &logbuffer, &logbuffer_size) < 0) {
1747  av_log(NULL, AV_LOG_FATAL, "Error reading log file '%s' for pass-2 encoding\n",
1748  logfilename);
1749  exit(1);
1750  }
1751  codec->stats_in = logbuffer;
1752  }
1753  }
1754  }
1755  }
1756  }
1757 
1758  /* open each encoder */
1759  for (i = 0; i < nb_output_streams; i++) {
1760  ost = output_streams[i];
1761  if (ost->encoding_needed) {
1762  AVCodec *codec = ost->enc;
1763  AVCodecContext *dec = NULL;
1764 
1765  if ((ist = get_input_stream(ost)))
1766  dec = ist->st->codec;
1767  if (dec && dec->subtitle_header) {
1769  if (!ost->st->codec->subtitle_header) {
1770  ret = AVERROR(ENOMEM);
1771  goto dump_format;
1772  }
1773  memcpy(ost->st->codec->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
1775  }
1776  if (!av_dict_get(ost->opts, "threads", NULL, 0))
1777  av_dict_set(&ost->opts, "threads", "auto", 0);
1778  if ((ret = avcodec_open2(ost->st->codec, codec, &ost->opts)) < 0) {
1779  if (ret == AVERROR_EXPERIMENTAL)
1780  abort_codec_experimental(codec, 1);
1781  snprintf(error, sizeof(error), "Error while opening encoder for output stream #%d:%d - maybe incorrect parameters such as bit_rate, rate, width or height",
1782  ost->file_index, ost->index);
1783  goto dump_format;
1784  }
1785  assert_avoptions(ost->opts);
1786  if (ost->st->codec->bit_rate && ost->st->codec->bit_rate < 1000)
1787  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
1788  "It takes bits/s as argument, not kbits/s\n");
1789  extra_size += ost->st->codec->extradata_size;
1790 
1791  if (ost->st->codec->me_threshold)
1792  input_streams[ost->source_index]->st->codec->debug |= FF_DEBUG_MV;
1793  }
1794  }
1795 
1796  /* init input streams */
1797  for (i = 0; i < nb_input_streams; i++)
1798  if ((ret = init_input_stream(i, error, sizeof(error))) < 0)
1799  goto dump_format;
1800 
1801  /* discard unused programs */
1802  for (i = 0; i < nb_input_files; i++) {
1803  InputFile *ifile = input_files[i];
1804  for (j = 0; j < ifile->ctx->nb_programs; j++) {
1805  AVProgram *p = ifile->ctx->programs[j];
1806  int discard = AVDISCARD_ALL;
1807 
1808  for (k = 0; k < p->nb_stream_indexes; k++)
1809  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
1810  discard = AVDISCARD_DEFAULT;
1811  break;
1812  }
1813  p->discard = discard;
1814  }
1815  }
1816 
1817  /* open files and write file headers */
1818  for (i = 0; i < nb_output_files; i++) {
1819  oc = output_files[i]->ctx;
1820  oc->interrupt_callback = int_cb;
1821  if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
1822  char errbuf[128];
1823  const char *errbuf_ptr = errbuf;
1824  if (av_strerror(ret, errbuf, sizeof(errbuf)) < 0)
1825  errbuf_ptr = strerror(AVUNERROR(ret));
1826  snprintf(error, sizeof(error), "Could not write header for output file #%d (incorrect codec parameters ?): %s", i, errbuf_ptr);
1827  ret = AVERROR(EINVAL);
1828  goto dump_format;
1829  }
1830  assert_avoptions(output_files[i]->opts);
1831  if (strcmp(oc->oformat->name, "rtp")) {
1832  want_sdp = 0;
1833  }
1834  }
1835 
1836  dump_format:
1837  /* dump the file output parameters - cannot be done before in case
1838  of stream copy */
1839  for (i = 0; i < nb_output_files; i++) {
1840  av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
1841  }
1842 
1843  /* dump the stream mapping */
1844  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
1845  for (i = 0; i < nb_input_streams; i++) {
1846  ist = input_streams[i];
1847 
1848  for (j = 0; j < ist->nb_filters; j++) {
1849  if (ist->filters[j]->graph->graph_desc) {
1850  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
1851  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
1852  ist->filters[j]->name);
1853  if (nb_filtergraphs > 1)
1854  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
1855  av_log(NULL, AV_LOG_INFO, "\n");
1856  }
1857  }
1858  }
1859 
1860  for (i = 0; i < nb_output_streams; i++) {
1861  ost = output_streams[i];
1862 
1863  if (ost->attachment_filename) {
1864  /* an attached file */
1865  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
1866  ost->attachment_filename, ost->file_index, ost->index);
1867  continue;
1868  }
1869 
1870  if (ost->filter && ost->filter->graph->graph_desc) {
1871  /* output from a complex graph */
1872  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
1873  if (nb_filtergraphs > 1)
1874  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
1875 
1876  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
1877  ost->index, ost->enc ? ost->enc->name : "?");
1878  continue;
1879  }
1880 
1881  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
1882  input_streams[ost->source_index]->file_index,
1883  input_streams[ost->source_index]->st->index,
1884  ost->file_index,
1885  ost->index);
1886  if (ost->sync_ist != input_streams[ost->source_index])
1887  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
1888  ost->sync_ist->file_index,
1889  ost->sync_ist->st->index);
1890  if (ost->stream_copy)
1891  av_log(NULL, AV_LOG_INFO, " (copy)");
1892  else
1893  av_log(NULL, AV_LOG_INFO, " (%s -> %s)", input_streams[ost->source_index]->dec ?
1894  input_streams[ost->source_index]->dec->name : "?",
1895  ost->enc ? ost->enc->name : "?");
1896  av_log(NULL, AV_LOG_INFO, "\n");
1897  }
1898 
1899  if (ret) {
1900  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
1901  return ret;
1902  }
1903 
1904  if (want_sdp) {
1905  print_sdp();
1906  }
1907 
1908  return 0;
1909 }
1910 
1911 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
1912 static int need_output(void)
1913 {
1914  int i;
1915 
1916  for (i = 0; i < nb_output_streams; i++) {
1917  OutputStream *ost = output_streams[i];
1918  OutputFile *of = output_files[ost->file_index];
1919  AVFormatContext *os = output_files[ost->file_index]->ctx;
1920 
1921  if (ost->finished ||
1922  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
1923  continue;
1924  if (ost->frame_number >= ost->max_frames) {
1925  int j;
1926  for (j = 0; j < of->ctx->nb_streams; j++)
1927  output_streams[of->ost_index + j]->finished = 1;
1928  continue;
1929  }
1930 
1931  return 1;
1932  }
1933 
1934  return 0;
1935 }
1936 
1938 {
1939  InputFile *ifile = NULL;
1940  int64_t ipts_min = INT64_MAX;
1941  int i;
1942 
1943  for (i = 0; i < nb_input_streams; i++) {
1944  InputStream *ist = input_streams[i];
1945  int64_t ipts = ist->last_dts;
1946 
1947  if (ist->discard || input_files[ist->file_index]->eagain)
1948  continue;
1949  if (!input_files[ist->file_index]->eof_reached) {
1950  if (ipts < ipts_min) {
1951  ipts_min = ipts;
1952  ifile = input_files[ist->file_index];
1953  }
1954  }
1955  }
1956 
1957  return ifile;
1958 }
1959 
1960 #if HAVE_PTHREADS
1961 static void *input_thread(void *arg)
1962 {
1963  InputFile *f = arg;
1964  int ret = 0;
1965 
1966  while (!transcoding_finished && ret >= 0) {
1967  AVPacket pkt;
1968  ret = av_read_frame(f->ctx, &pkt);
1969 
1970  if (ret == AVERROR(EAGAIN)) {
1971  av_usleep(10000);
1972  ret = 0;
1973  continue;
1974  } else if (ret < 0)
1975  break;
1976 
1977  pthread_mutex_lock(&f->fifo_lock);
1978  while (!av_fifo_space(f->fifo))
1979  pthread_cond_wait(&f->fifo_cond, &f->fifo_lock);
1980 
1981  av_dup_packet(&pkt);
1982  av_fifo_generic_write(f->fifo, &pkt, sizeof(pkt), NULL);
1983 
1984  pthread_mutex_unlock(&f->fifo_lock);
1985  }
1986 
1987  f->finished = 1;
1988  return NULL;
1989 }
1990 
1991 static void free_input_threads(void)
1992 {
1993  int i;
1994 
1995  if (nb_input_files == 1)
1996  return;
1997 
1998  transcoding_finished = 1;
1999 
2000  for (i = 0; i < nb_input_files; i++) {
2001  InputFile *f = input_files[i];
2002  AVPacket pkt;
2003 
2004  if (!f->fifo || f->joined)
2005  continue;
2006 
2007  pthread_mutex_lock(&f->fifo_lock);
2008  while (av_fifo_size(f->fifo)) {
2009  av_fifo_generic_read(f->fifo, &pkt, sizeof(pkt), NULL);
2010  av_free_packet(&pkt);
2011  }
2012  pthread_cond_signal(&f->fifo_cond);
2013  pthread_mutex_unlock(&f->fifo_lock);
2014 
2015  pthread_join(f->thread, NULL);
2016  f->joined = 1;
2017 
2018  while (av_fifo_size(f->fifo)) {
2019  av_fifo_generic_read(f->fifo, &pkt, sizeof(pkt), NULL);
2020  av_free_packet(&pkt);
2021  }
2022  av_fifo_free(f->fifo);
2023  }
2024 }
2025 
2026 static int init_input_threads(void)
2027 {
2028  int i, ret;
2029 
2030  if (nb_input_files == 1)
2031  return 0;
2032 
2033  for (i = 0; i < nb_input_files; i++) {
2034  InputFile *f = input_files[i];
2035 
2036  if (!(f->fifo = av_fifo_alloc(8*sizeof(AVPacket))))
2037  return AVERROR(ENOMEM);
2038 
2039  pthread_mutex_init(&f->fifo_lock, NULL);
2040  pthread_cond_init (&f->fifo_cond, NULL);
2041 
2042  if ((ret = pthread_create(&f->thread, NULL, input_thread, f)))
2043  return AVERROR(ret);
2044  }
2045  return 0;
2046 }
2047 
2048 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
2049 {
2050  int ret = 0;
2051 
2052  pthread_mutex_lock(&f->fifo_lock);
2053 
2054  if (av_fifo_size(f->fifo)) {
2055  av_fifo_generic_read(f->fifo, pkt, sizeof(*pkt), NULL);
2056  pthread_cond_signal(&f->fifo_cond);
2057  } else {
2058  if (f->finished)
2059  ret = AVERROR_EOF;
2060  else
2061  ret = AVERROR(EAGAIN);
2062  }
2063 
2064  pthread_mutex_unlock(&f->fifo_lock);
2065 
2066  return ret;
2067 }
2068 #endif
2069 
2071 {
2072 #if HAVE_PTHREADS
2073  if (nb_input_files > 1)
2074  return get_input_packet_mt(f, pkt);
2075 #endif
2076  return av_read_frame(f->ctx, pkt);
2077 }
2078 
2079 static int got_eagain(void)
2080 {
2081  int i;
2082  for (i = 0; i < nb_input_files; i++)
2083  if (input_files[i]->eagain)
2084  return 1;
2085  return 0;
2086 }
2087 
2088 static void reset_eagain(void)
2089 {
2090  int i;
2091  for (i = 0; i < nb_input_files; i++)
2092  input_files[i]->eagain = 0;
2093 }
2094 
2095 /*
2096  * Read one packet from an input file and send it for
2097  * - decoding -> lavfi (audio/video)
2098  * - decoding -> encoding -> muxing (subtitles)
2099  * - muxing (streamcopy)
2100  *
2101  * Return
2102  * - 0 -- one packet was read and processed
2103  * - AVERROR(EAGAIN) -- no packets were available for selected file,
2104  * this function should be called again
2105  * - AVERROR_EOF -- this function should not be called again
2106  */
2107 static int process_input(void)
2108 {
2109  InputFile *ifile;
2110  AVFormatContext *is;
2111  InputStream *ist;
2112  AVPacket pkt;
2113  int ret, i, j;
2114 
2115  /* select the stream that we must read now */
2116  ifile = select_input_file();
2117  /* if none, if is finished */
2118  if (!ifile) {
2119  if (got_eagain()) {
2120  reset_eagain();
2121  av_usleep(10000);
2122  return AVERROR(EAGAIN);
2123  }
2124  av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from.\n");
2125  return AVERROR_EOF;
2126  }
2127 
2128  is = ifile->ctx;
2129  ret = get_input_packet(ifile, &pkt);
2130 
2131  if (ret == AVERROR(EAGAIN)) {
2132  ifile->eagain = 1;
2133  return ret;
2134  }
2135  if (ret < 0) {
2136  if (ret != AVERROR_EOF) {
2137  print_error(is->filename, ret);
2138  if (exit_on_error)
2139  exit(1);
2140  }
2141  ifile->eof_reached = 1;
2142 
2143  for (i = 0; i < ifile->nb_streams; i++) {
2144  ist = input_streams[ifile->ist_index + i];
2145  if (ist->decoding_needed)
2146  output_packet(ist, NULL);
2147 
2148  /* mark all outputs that don't go through lavfi as finished */
2149  for (j = 0; j < nb_output_streams; j++) {
2150  OutputStream *ost = output_streams[j];
2151 
2152  if (ost->source_index == ifile->ist_index + i &&
2153  (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
2154  finish_output_stream(ost);
2155  }
2156  }
2157 
2158  return AVERROR(EAGAIN);
2159  }
2160 
2161  reset_eagain();
2162 
2163  if (do_pkt_dump) {
2165  is->streams[pkt.stream_index]);
2166  }
2167  /* the following test is needed in case new streams appear
2168  dynamically in stream : we ignore them */
2169  if (pkt.stream_index >= ifile->nb_streams)
2170  goto discard_packet;
2171 
2172  ist = input_streams[ifile->ist_index + pkt.stream_index];
2173  if (ist->discard)
2174  goto discard_packet;
2175 
2176  if (pkt.dts != AV_NOPTS_VALUE)
2177  pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
2178  if (pkt.pts != AV_NOPTS_VALUE)
2179  pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
2180 
2181  if (pkt.pts != AV_NOPTS_VALUE)
2182  pkt.pts *= ist->ts_scale;
2183  if (pkt.dts != AV_NOPTS_VALUE)
2184  pkt.dts *= ist->ts_scale;
2185 
2186  if (pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
2187  (is->iformat->flags & AVFMT_TS_DISCONT)) {
2188  int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
2189  int64_t delta = pkt_dts - ist->next_dts;
2190 
2191  if ((FFABS(delta) > 1LL * dts_delta_threshold * AV_TIME_BASE || pkt_dts + 1 < ist->last_dts) && !copy_ts) {
2192  ifile->ts_offset -= delta;
2194  "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
2195  delta, ifile->ts_offset);
2196  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
2197  if (pkt.pts != AV_NOPTS_VALUE)
2198  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
2199  }
2200  }
2201 
2202  ret = output_packet(ist, &pkt);
2203  if (ret < 0) {
2204  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d\n",
2205  ist->file_index, ist->st->index);
2206  if (exit_on_error)
2207  exit(1);
2208  }
2209 
2210 discard_packet:
2211  av_free_packet(&pkt);
2212 
2213  return 0;
2214 }
2215 
2216 /*
2217  * The following code is the main loop of the file converter
2218  */
2219 static int transcode(void)
2220 {
2221  int ret, i, need_input = 1;
2222  AVFormatContext *os;
2223  OutputStream *ost;
2224  InputStream *ist;
2225  int64_t timer_start;
2226 
2227  ret = transcode_init();
2228  if (ret < 0)
2229  goto fail;
2230 
2231  av_log(NULL, AV_LOG_INFO, "Press ctrl-c to stop encoding\n");
2232  term_init();
2233 
2234  timer_start = av_gettime();
2235 
2236 #if HAVE_PTHREADS
2237  if ((ret = init_input_threads()) < 0)
2238  goto fail;
2239 #endif
2240 
2241  while (!received_sigterm) {
2242  /* check if there's any stream where output is still needed */
2243  if (!need_output()) {
2244  av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
2245  break;
2246  }
2247 
2248  /* read and process one input packet if needed */
2249  if (need_input) {
2250  ret = process_input();
2251  if (ret == AVERROR_EOF)
2252  need_input = 0;
2253  }
2254 
2255  ret = poll_filters();
2256  if (ret < 0) {
2257  if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN))
2258  continue;
2259 
2260  av_log(NULL, AV_LOG_ERROR, "Error while filtering.\n");
2261  break;
2262  }
2263 
2264  /* dump report by using the output first video and audio streams */
2265  print_report(0, timer_start);
2266  }
2267 #if HAVE_PTHREADS
2268  free_input_threads();
2269 #endif
2270 
2271  /* at the end of stream, we must flush the decoder buffers */
2272  for (i = 0; i < nb_input_streams; i++) {
2273  ist = input_streams[i];
2274  if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
2275  output_packet(ist, NULL);
2276  }
2277  }
2278  poll_filters();
2279  flush_encoders();
2280 
2281  term_exit();
2282 
2283  /* write the trailer if needed and close file */
2284  for (i = 0; i < nb_output_files; i++) {
2285  os = output_files[i]->ctx;
2286  av_write_trailer(os);
2287  }
2288 
2289  /* dump report by using the first video and audio streams */
2290  print_report(1, timer_start);
2291 
2292  /* close each encoder */
2293  for (i = 0; i < nb_output_streams; i++) {
2294  ost = output_streams[i];
2295  if (ost->encoding_needed) {
2296  av_freep(&ost->st->codec->stats_in);
2297  avcodec_close(ost->st->codec);
2298  }
2299  }
2300 
2301  /* close each decoder */
2302  for (i = 0; i < nb_input_streams; i++) {
2303  ist = input_streams[i];
2304  if (ist->decoding_needed) {
2305  avcodec_close(ist->st->codec);
2306  }
2307  }
2308 
2309  /* finished ! */
2310  ret = 0;
2311 
2312  fail:
2313 #if HAVE_PTHREADS
2314  free_input_threads();
2315 #endif
2316 
2317  if (output_streams) {
2318  for (i = 0; i < nb_output_streams; i++) {
2319  ost = output_streams[i];
2320  if (ost) {
2321  if (ost->stream_copy)
2322  av_freep(&ost->st->codec->extradata);
2323  if (ost->logfile) {
2324  fclose(ost->logfile);
2325  ost->logfile = NULL;
2326  }
2327  av_freep(&ost->st->codec->subtitle_header);
2328  av_free(ost->forced_kf_pts);
2329  av_dict_free(&ost->opts);
2330  }
2331  }
2332  }
2333  return ret;
2334 }
2335 
2336 static int64_t getutime(void)
2337 {
2338 #if HAVE_GETRUSAGE
2339  struct rusage rusage;
2340 
2341  getrusage(RUSAGE_SELF, &rusage);
2342  return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
2343 #elif HAVE_GETPROCESSTIMES
2344  HANDLE proc;
2345  FILETIME c, e, k, u;
2346  proc = GetCurrentProcess();
2347  GetProcessTimes(proc, &c, &e, &k, &u);
2348  return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
2349 #else
2350  return av_gettime();
2351 #endif
2352 }
2353 
2354 static int64_t getmaxrss(void)
2355 {
2356 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
2357  struct rusage rusage;
2358  getrusage(RUSAGE_SELF, &rusage);
2359  return (int64_t)rusage.ru_maxrss * 1024;
2360 #elif HAVE_GETPROCESSMEMORYINFO
2361  HANDLE proc;
2362  PROCESS_MEMORY_COUNTERS memcounters;
2363  proc = GetCurrentProcess();
2364  memcounters.cb = sizeof(memcounters);
2365  GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
2366  return memcounters.PeakPagefileUsage;
2367 #else
2368  return 0;
2369 #endif
2370 }
2371 
2372 int main(int argc, char **argv)
2373 {
2374  int ret;
2375  int64_t ti;
2376 
2377  atexit(exit_program);
2378 
2380  parse_loglevel(argc, argv, options);
2381 
2383 #if CONFIG_AVDEVICE
2385 #endif
2387  av_register_all();
2389 
2390  show_banner();
2391 
2392  /* parse options and open all input/output files */
2393  ret = avconv_parse_options(argc, argv);
2394  if (ret < 0)
2395  exit(1);
2396 
2397  if (nb_output_files <= 0 && nb_input_files == 0) {
2398  show_usage();
2399  av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
2400  exit(1);
2401  }
2402 
2403  /* file converter / grab */
2404  if (nb_output_files <= 0) {
2405  fprintf(stderr, "At least one output file must be specified\n");
2406  exit(1);
2407  }
2408 
2409  ti = getutime();
2410  if (transcode() < 0)
2411  exit(1);
2412  ti = getutime() - ti;
2413  if (do_benchmark) {
2414  int maxrss = getmaxrss() / 1024;
2415  printf("bench: utime=%0.3fs maxrss=%ikB\n", ti / 1000000.0, maxrss);
2416  }
2417 
2418  exit(0);
2419  return 0;
2420 }