FFmpeg  3.2.10
muxing.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a copy
5  * of this software and associated documentation files (the "Software"), to deal
6  * in the Software without restriction, including without limitation the rights
7  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8  * copies of the Software, and to permit persons to whom the Software is
9  * furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20  * THE SOFTWARE.
21  */
22 
23 /**
24  * @file
25  * libavformat API example.
26  *
27  * Output a media file in any supported libavformat format. The default
28  * codecs are used.
29  * @example muxing.c
30  */
31 
32 #include <stdlib.h>
33 #include <stdio.h>
34 #include <string.h>
35 #include <math.h>
36 
37 #include <libavutil/avassert.h>
39 #include <libavutil/opt.h>
40 #include <libavutil/mathematics.h>
41 #include <libavutil/timestamp.h>
42 #include <libavformat/avformat.h>
43 #include <libswscale/swscale.h>
45 
46 #define STREAM_DURATION 10.0
47 #define STREAM_FRAME_RATE 25 /* 25 images/s */
48 #define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */
49 
50 #define SCALE_FLAGS SWS_BICUBIC
51 
52 // a wrapper around a single output AVStream
53 typedef struct OutputStream {
56 
57  /* pts of the next frame that will be generated */
58  int64_t next_pts;
60 
63 
64  float t, tincr, tincr2;
65 
66  struct SwsContext *sws_ctx;
68 } OutputStream;
69 
70 static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt)
71 {
72  AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;
73 
74  printf("pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
75  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
76  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),
77  av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),
78  pkt->stream_index);
79 }
80 
82 {
83  /* rescale output packet timestamp values from codec to stream timebase */
84  av_packet_rescale_ts(pkt, *time_base, st->time_base);
85  pkt->stream_index = st->index;
86 
87  /* Write the compressed frame to the media file. */
88  log_packet(fmt_ctx, pkt);
89  return av_interleaved_write_frame(fmt_ctx, pkt);
90 }
91 
92 /* Add an output stream. */
93 static void add_stream(OutputStream *ost, AVFormatContext *oc,
94  AVCodec **codec,
95  enum AVCodecID codec_id)
96 {
97  AVCodecContext *c;
98  int i;
99 
100  /* find the encoder */
101  *codec = avcodec_find_encoder(codec_id);
102  if (!(*codec)) {
103  fprintf(stderr, "Could not find encoder for '%s'\n",
104  avcodec_get_name(codec_id));
105  exit(1);
106  }
107 
108  ost->st = avformat_new_stream(oc, NULL);
109  if (!ost->st) {
110  fprintf(stderr, "Could not allocate stream\n");
111  exit(1);
112  }
113  ost->st->id = oc->nb_streams-1;
114  c = avcodec_alloc_context3(*codec);
115  if (!c) {
116  fprintf(stderr, "Could not alloc an encoding context\n");
117  exit(1);
118  }
119  ost->enc = c;
120 
121  switch ((*codec)->type) {
122  case AVMEDIA_TYPE_AUDIO:
123  c->sample_fmt = (*codec)->sample_fmts ?
124  (*codec)->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
125  c->bit_rate = 64000;
126  c->sample_rate = 44100;
127  if ((*codec)->supported_samplerates) {
128  c->sample_rate = (*codec)->supported_samplerates[0];
129  for (i = 0; (*codec)->supported_samplerates[i]; i++) {
130  if ((*codec)->supported_samplerates[i] == 44100)
131  c->sample_rate = 44100;
132  }
133  }
136  if ((*codec)->channel_layouts) {
137  c->channel_layout = (*codec)->channel_layouts[0];
138  for (i = 0; (*codec)->channel_layouts[i]; i++) {
139  if ((*codec)->channel_layouts[i] == AV_CH_LAYOUT_STEREO)
141  }
142  }
144  ost->st->time_base = (AVRational){ 1, c->sample_rate };
145  break;
146 
147  case AVMEDIA_TYPE_VIDEO:
148  c->codec_id = codec_id;
149 
150  c->bit_rate = 400000;
151  /* Resolution must be a multiple of two. */
152  c->width = 352;
153  c->height = 288;
154  /* timebase: This is the fundamental unit of time (in seconds) in terms
155  * of which frame timestamps are represented. For fixed-fps content,
156  * timebase should be 1/framerate and timestamp increments should be
157  * identical to 1. */
158  ost->st->time_base = (AVRational){ 1, STREAM_FRAME_RATE };
159  c->time_base = ost->st->time_base;
160 
161  c->gop_size = 12; /* emit one intra frame every twelve frames at most */
162  c->pix_fmt = STREAM_PIX_FMT;
163  if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
164  /* just for testing, we also add B-frames */
165  c->max_b_frames = 2;
166  }
167  if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
168  /* Needed to avoid using macroblocks in which some coeffs overflow.
169  * This does not happen with normal video, it just happens here as
170  * the motion of the chroma plane does not match the luma plane. */
171  c->mb_decision = 2;
172  }
173  break;
174 
175  default:
176  break;
177  }
178 
179  /* Some formats want stream headers to be separate. */
180  if (oc->oformat->flags & AVFMT_GLOBALHEADER)
182 }
183 
184 /**************************************************************/
185 /* audio output */
186 
187 static AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt,
188  uint64_t channel_layout,
189  int sample_rate, int nb_samples)
190 {
192  int ret;
193 
194  if (!frame) {
195  fprintf(stderr, "Error allocating an audio frame\n");
196  exit(1);
197  }
198 
199  frame->format = sample_fmt;
200  frame->channel_layout = channel_layout;
201  frame->sample_rate = sample_rate;
202  frame->nb_samples = nb_samples;
203 
204  if (nb_samples) {
205  ret = av_frame_get_buffer(frame, 0);
206  if (ret < 0) {
207  fprintf(stderr, "Error allocating an audio buffer\n");
208  exit(1);
209  }
210  }
211 
212  return frame;
213 }
214 
215 static void open_audio(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
216 {
217  AVCodecContext *c;
218  int nb_samples;
219  int ret;
220  AVDictionary *opt = NULL;
221 
222  c = ost->enc;
223 
224  /* open it */
225  av_dict_copy(&opt, opt_arg, 0);
226  ret = avcodec_open2(c, codec, &opt);
227  av_dict_free(&opt);
228  if (ret < 0) {
229  fprintf(stderr, "Could not open audio codec: %s\n", av_err2str(ret));
230  exit(1);
231  }
232 
233  /* init signal generator */
234  ost->t = 0;
235  ost->tincr = 2 * M_PI * 110.0 / c->sample_rate;
236  /* increment frequency by 110 Hz per second */
237  ost->tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
238 
240  nb_samples = 10000;
241  else
242  nb_samples = c->frame_size;
243 
245  c->sample_rate, nb_samples);
247  c->sample_rate, nb_samples);
248 
249  /* copy the stream parameters to the muxer */
251  if (ret < 0) {
252  fprintf(stderr, "Could not copy the stream parameters\n");
253  exit(1);
254  }
255 
256  /* create resampler context */
257  ost->swr_ctx = swr_alloc();
258  if (!ost->swr_ctx) {
259  fprintf(stderr, "Could not allocate resampler context\n");
260  exit(1);
261  }
262 
263  /* set options */
264  av_opt_set_int (ost->swr_ctx, "in_channel_count", c->channels, 0);
265  av_opt_set_int (ost->swr_ctx, "in_sample_rate", c->sample_rate, 0);
266  av_opt_set_sample_fmt(ost->swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
267  av_opt_set_int (ost->swr_ctx, "out_channel_count", c->channels, 0);
268  av_opt_set_int (ost->swr_ctx, "out_sample_rate", c->sample_rate, 0);
269  av_opt_set_sample_fmt(ost->swr_ctx, "out_sample_fmt", c->sample_fmt, 0);
270 
271  /* initialize the resampling context */
272  if ((ret = swr_init(ost->swr_ctx)) < 0) {
273  fprintf(stderr, "Failed to initialize the resampling context\n");
274  exit(1);
275  }
276 }
277 
278 /* Prepare a 16 bit dummy audio frame of 'frame_size' samples and
279  * 'nb_channels' channels. */
281 {
282  AVFrame *frame = ost->tmp_frame;
283  int j, i, v;
284  int16_t *q = (int16_t*)frame->data[0];
285 
286  /* check if we want to generate more frames */
287  if (av_compare_ts(ost->next_pts, ost->enc->time_base,
288  STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)
289  return NULL;
290 
291  for (j = 0; j <frame->nb_samples; j++) {
292  v = (int)(sin(ost->t) * 10000);
293  for (i = 0; i < ost->enc->channels; i++)
294  *q++ = v;
295  ost->t += ost->tincr;
296  ost->tincr += ost->tincr2;
297  }
298 
299  frame->pts = ost->next_pts;
300  ost->next_pts += frame->nb_samples;
301 
302  return frame;
303 }
304 
305 /*
306  * encode one audio frame and send it to the muxer
307  * return 1 when encoding is finished, 0 otherwise
308  */
310 {
311  AVCodecContext *c;
312  AVPacket pkt = { 0 }; // data and size must be 0;
313  AVFrame *frame;
314  int ret;
315  int got_packet;
316  int dst_nb_samples;
317 
318  av_init_packet(&pkt);
319  c = ost->enc;
320 
321  frame = get_audio_frame(ost);
322 
323  if (frame) {
324  /* convert samples from native format to destination codec format, using the resampler */
325  /* compute destination number of samples */
326  dst_nb_samples = av_rescale_rnd(swr_get_delay(ost->swr_ctx, c->sample_rate) + frame->nb_samples,
328  av_assert0(dst_nb_samples == frame->nb_samples);
329 
330  /* when we pass a frame to the encoder, it may keep a reference to it
331  * internally;
332  * make sure we do not overwrite it here
333  */
334  ret = av_frame_make_writable(ost->frame);
335  if (ret < 0)
336  exit(1);
337 
338  /* convert to destination format */
339  ret = swr_convert(ost->swr_ctx,
340  ost->frame->data, dst_nb_samples,
341  (const uint8_t **)frame->data, frame->nb_samples);
342  if (ret < 0) {
343  fprintf(stderr, "Error while converting\n");
344  exit(1);
345  }
346  frame = ost->frame;
347 
348  frame->pts = av_rescale_q(ost->samples_count, (AVRational){1, c->sample_rate}, c->time_base);
349  ost->samples_count += dst_nb_samples;
350  }
351 
352  ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
353  if (ret < 0) {
354  fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
355  exit(1);
356  }
357 
358  if (got_packet) {
359  ret = write_frame(oc, &c->time_base, ost->st, &pkt);
360  if (ret < 0) {
361  fprintf(stderr, "Error while writing audio frame: %s\n",
362  av_err2str(ret));
363  exit(1);
364  }
365  }
366 
367  return (frame || got_packet) ? 0 : 1;
368 }
369 
370 /**************************************************************/
371 /* video output */
372 
374 {
375  AVFrame *picture;
376  int ret;
377 
378  picture = av_frame_alloc();
379  if (!picture)
380  return NULL;
381 
382  picture->format = pix_fmt;
383  picture->width = width;
384  picture->height = height;
385 
386  /* allocate the buffers for the frame data */
387  ret = av_frame_get_buffer(picture, 32);
388  if (ret < 0) {
389  fprintf(stderr, "Could not allocate frame data.\n");
390  exit(1);
391  }
392 
393  return picture;
394 }
395 
396 static void open_video(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
397 {
398  int ret;
399  AVCodecContext *c = ost->enc;
400  AVDictionary *opt = NULL;
401 
402  av_dict_copy(&opt, opt_arg, 0);
403 
404  /* open the codec */
405  ret = avcodec_open2(c, codec, &opt);
406  av_dict_free(&opt);
407  if (ret < 0) {
408  fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
409  exit(1);
410  }
411 
412  /* allocate and init a re-usable frame */
413  ost->frame = alloc_picture(c->pix_fmt, c->width, c->height);
414  if (!ost->frame) {
415  fprintf(stderr, "Could not allocate video frame\n");
416  exit(1);
417  }
418 
419  /* If the output format is not YUV420P, then a temporary YUV420P
420  * picture is needed too. It is then converted to the required
421  * output format. */
422  ost->tmp_frame = NULL;
423  if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
425  if (!ost->tmp_frame) {
426  fprintf(stderr, "Could not allocate temporary picture\n");
427  exit(1);
428  }
429  }
430 
431  /* copy the stream parameters to the muxer */
433  if (ret < 0) {
434  fprintf(stderr, "Could not copy the stream parameters\n");
435  exit(1);
436  }
437 }
438 
439 /* Prepare a dummy image. */
440 static void fill_yuv_image(AVFrame *pict, int frame_index,
441  int width, int height)
442 {
443  int x, y, i, ret;
444 
445  /* when we pass a frame to the encoder, it may keep a reference to it
446  * internally;
447  * make sure we do not overwrite it here
448  */
449  ret = av_frame_make_writable(pict);
450  if (ret < 0)
451  exit(1);
452 
453  i = frame_index;
454 
455  /* Y */
456  for (y = 0; y < height; y++)
457  for (x = 0; x < width; x++)
458  pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
459 
460  /* Cb and Cr */
461  for (y = 0; y < height / 2; y++) {
462  for (x = 0; x < width / 2; x++) {
463  pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
464  pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
465  }
466  }
467 }
468 
470 {
471  AVCodecContext *c = ost->enc;
472 
473  /* check if we want to generate more frames */
474  if (av_compare_ts(ost->next_pts, c->time_base,
475  STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)
476  return NULL;
477 
478  if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
479  /* as we only generate a YUV420P picture, we must convert it
480  * to the codec pixel format if needed */
481  if (!ost->sws_ctx) {
482  ost->sws_ctx = sws_getContext(c->width, c->height,
484  c->width, c->height,
485  c->pix_fmt,
486  SCALE_FLAGS, NULL, NULL, NULL);
487  if (!ost->sws_ctx) {
488  fprintf(stderr,
489  "Could not initialize the conversion context\n");
490  exit(1);
491  }
492  }
493  fill_yuv_image(ost->tmp_frame, ost->next_pts, c->width, c->height);
494  sws_scale(ost->sws_ctx,
495  (const uint8_t * const *)ost->tmp_frame->data, ost->tmp_frame->linesize,
496  0, c->height, ost->frame->data, ost->frame->linesize);
497  } else {
498  fill_yuv_image(ost->frame, ost->next_pts, c->width, c->height);
499  }
500 
501  ost->frame->pts = ost->next_pts++;
502 
503  return ost->frame;
504 }
505 
506 /*
507  * encode one video frame and send it to the muxer
508  * return 1 when encoding is finished, 0 otherwise
509  */
511 {
512  int ret;
513  AVCodecContext *c;
514  AVFrame *frame;
515  int got_packet = 0;
516  AVPacket pkt = { 0 };
517 
518  c = ost->enc;
519 
520  frame = get_video_frame(ost);
521 
522  av_init_packet(&pkt);
523 
524  /* encode the image */
525  ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
526  if (ret < 0) {
527  fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
528  exit(1);
529  }
530 
531  if (got_packet) {
532  ret = write_frame(oc, &c->time_base, ost->st, &pkt);
533  } else {
534  ret = 0;
535  }
536 
537  if (ret < 0) {
538  fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
539  exit(1);
540  }
541 
542  return (frame || got_packet) ? 0 : 1;
543 }
544 
546 {
547  avcodec_free_context(&ost->enc);
548  av_frame_free(&ost->frame);
549  av_frame_free(&ost->tmp_frame);
550  sws_freeContext(ost->sws_ctx);
551  swr_free(&ost->swr_ctx);
552 }
553 
554 /**************************************************************/
555 /* media file output */
556 
557 int main(int argc, char **argv)
558 {
559  OutputStream video_st = { 0 }, audio_st = { 0 };
560  const char *filename;
561  AVOutputFormat *fmt;
562  AVFormatContext *oc;
563  AVCodec *audio_codec, *video_codec;
564  int ret;
565  int have_video = 0, have_audio = 0;
566  int encode_video = 0, encode_audio = 0;
567  AVDictionary *opt = NULL;
568  int i;
569 
570  /* Initialize libavcodec, and register all codecs and formats. */
571  av_register_all();
572 
573  if (argc < 2) {
574  printf("usage: %s output_file\n"
575  "API example program to output a media file with libavformat.\n"
576  "This program generates a synthetic audio and video stream, encodes and\n"
577  "muxes them into a file named output_file.\n"
578  "The output format is automatically guessed according to the file extension.\n"
579  "Raw images can also be output by using '%%d' in the filename.\n"
580  "\n", argv[0]);
581  return 1;
582  }
583 
584  filename = argv[1];
585  for (i = 2; i+1 < argc; i+=2) {
586  if (!strcmp(argv[i], "-flags") || !strcmp(argv[i], "-fflags"))
587  av_dict_set(&opt, argv[i]+1, argv[i+1], 0);
588  }
589 
590  /* allocate the output media context */
591  avformat_alloc_output_context2(&oc, NULL, NULL, filename);
592  if (!oc) {
593  printf("Could not deduce output format from file extension: using MPEG.\n");
594  avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
595  }
596  if (!oc)
597  return 1;
598 
599  fmt = oc->oformat;
600 
601  /* Add the audio and video streams using the default format codecs
602  * and initialize the codecs. */
603  if (fmt->video_codec != AV_CODEC_ID_NONE) {
604  add_stream(&video_st, oc, &video_codec, fmt->video_codec);
605  have_video = 1;
606  encode_video = 1;
607  }
608  if (fmt->audio_codec != AV_CODEC_ID_NONE) {
609  add_stream(&audio_st, oc, &audio_codec, fmt->audio_codec);
610  have_audio = 1;
611  encode_audio = 1;
612  }
613 
614  /* Now that all the parameters are set, we can open the audio and
615  * video codecs and allocate the necessary encode buffers. */
616  if (have_video)
617  open_video(oc, video_codec, &video_st, opt);
618 
619  if (have_audio)
620  open_audio(oc, audio_codec, &audio_st, opt);
621 
622  av_dump_format(oc, 0, filename, 1);
623 
624  /* open the output file, if needed */
625  if (!(fmt->flags & AVFMT_NOFILE)) {
626  ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
627  if (ret < 0) {
628  fprintf(stderr, "Could not open '%s': %s\n", filename,
629  av_err2str(ret));
630  return 1;
631  }
632  }
633 
634  /* Write the stream header, if any. */
635  ret = avformat_write_header(oc, &opt);
636  if (ret < 0) {
637  fprintf(stderr, "Error occurred when opening output file: %s\n",
638  av_err2str(ret));
639  return 1;
640  }
641 
642  while (encode_video || encode_audio) {
643  /* select the stream to encode */
644  if (encode_video &&
645  (!encode_audio || av_compare_ts(video_st.next_pts, video_st.enc->time_base,
646  audio_st.next_pts, audio_st.enc->time_base) <= 0)) {
647  encode_video = !write_video_frame(oc, &video_st);
648  } else {
649  encode_audio = !write_audio_frame(oc, &audio_st);
650  }
651  }
652 
653  /* Write the trailer, if any. The trailer must be written before you
654  * close the CodecContexts open when you wrote the header; otherwise
655  * av_write_trailer() may try to use memory that was freed on
656  * av_codec_close(). */
657  av_write_trailer(oc);
658 
659  /* Close each codec. */
660  if (have_video)
661  close_stream(oc, &video_st);
662  if (have_audio)
663  close_stream(oc, &audio_st);
664 
665  if (!(fmt->flags & AVFMT_NOFILE))
666  /* Close the output file. */
667  avio_closep(&oc->pb);
668 
669  /* free the stream */
671 
672  return 0;
673 }
int avio_open(AVIOContext **s, const char *url, int flags)
Create and initialize a AVIOContext for accessing the resource indicated by url.
int64_t next_pts
Definition: muxing.c:58
float, planar
Definition: samplefmt.h:69
const struct AVCodec * codec
Definition: avcodec.h:1695
static enum AVPixelFormat pix_fmt
This structure describes decoded (raw) audio or video data.
Definition: frame.h:184
int main(int argc, char **argv)
Definition: muxing.c:557
AVCodec * avcodec_find_encoder(enum AVCodecID id)
Find a registered encoder with a matching codec ID.
int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt)
Write a packet to an output media file ensuring correct interleaving.
int64_t bit_rate
the average bitrate
Definition: avcodec.h:1751
AVStream * st
Definition: muxing.c:54
static int write_audio_frame(AVFormatContext *oc, OutputStream *ost)
Definition: muxing.c:309
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:1972
enum AVCodecID video_codec
default video codec
Definition: avformat.h:535
static AVFormatContext * fmt_ctx
int index
stream index in AVFormatContext
Definition: avformat.h:890
float tincr
Definition: muxing.c:64
#define AVIO_FLAG_WRITE
write-only
Definition: avio.h:607
attribute_deprecated int avcodec_encode_audio2(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
Encode a frame of audio.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1914
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
static AVPacket pkt
#define AV_CH_LAYOUT_STEREO
static void add_stream(OutputStream *ost, AVFormatContext *oc, AVCodec **codec, enum AVCodecID codec_id)
Definition: muxing.c:93
AVCodec.
Definition: avcodec.h:3611
static void open_video(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
Definition: muxing.c:396
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1823
static void close_stream(AVFormatContext *oc, OutputStream *ost)
Definition: muxing.c:545
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
Format I/O context.
Definition: avformat.h:1338
#define SCALE_FLAGS
Definition: muxing.c:50
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:2456
int flags
can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_GLOBALHEADER, AVFMT_NOTIMESTAMPS, AVFMT_VARIABLE_FPS, AVFMT_NODIMENSIONS, AVFMT_NOSTREAMS, AVFMT_ALLOW_FLUSH, AVFMT_TS_NONSTRICT, AVFMT_TS_NEGATIVE
Definition: avformat.h:543
Round toward +infinity.
Definition: mathematics.h:83
struct SwrContext * swr_alloc(void)
Allocate SwrContext.
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt)
Definition: muxing.c:70
AVOptions.
timestamp utils, mostly useful for debugging/logging purposes
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: avcodec.h:1629
int id
Format-specific stream ID.
Definition: avformat.h:896
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:268
AVStream * avformat_new_stream(AVFormatContext *s, const AVCodec *c)
Add a new stream to a media file.
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1406
struct SwsContext * sws_getContext(int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Allocate and return an SwsContext.
struct SwrContext * swr_ctx
Definition: muxing.c:67
int swr_convert(struct SwrContext *s, uint8_t **out, int out_count, const uint8_t **in, int in_count)
Convert audio.
external API header
struct AVOutputFormat * oformat
The output container format.
Definition: avformat.h:1357
int avformat_alloc_output_context2(AVFormatContext **ctx, AVOutputFormat *oformat, const char *format_name, const char *filename)
Allocate an AVFormatContext for an output format.
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq) av_const
Rescale a 64-bit integer by 2 rational numbers.
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate, streams, container, programs, metadata, side data, codec and time base.
libswresample public header
AVCodecID
Identify the syntax and semantics of the bitstream.
Definition: avcodec.h:191
int width
width and height of the video frame
Definition: frame.h:236
void av_packet_rescale_ts(AVPacket *pkt, AVRational tb_src, AVRational tb_dst)
Convert valid timing fields (timestamps / durations) in a packet from one timebase to another...
static AVFrame * alloc_picture(enum AVPixelFormat pix_fmt, int width, int height)
Definition: muxing.c:373
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:76
int capabilities
Codec capabilities.
Definition: avcodec.h:3630
struct AVDictionary AVDictionary
Definition: dict.h:90
void av_dict_free(AVDictionary **m)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1781
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
int samples_count
Definition: muxing.c:59
simple assert() macros that are a bit more flexible than ISO C assert().
AVFrame * frame
Definition: muxing.c:61
int64_t swr_get_delay(struct SwrContext *s, int64_t base)
Gets the delay the next input sample will experience relative to the next output sample.
#define AV_CODEC_CAP_VARIABLE_FRAME_SIZE
Audio encoder supports receiving a different number of samples in each call.
Definition: avcodec.h:1038
struct SwrContext SwrContext
The libswresample context.
Definition: swresample.h:186
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:2499
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:353
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1394
audio channel layout utility functions
static AVFrame * alloc_audio_frame(enum AVSampleFormat sample_fmt, uint64_t channel_layout, int sample_rate, int nb_samples)
Definition: muxing.c:187
av_warn_unused_result int avformat_write_header(AVFormatContext *s, AVDictionary **options)
Allocate the stream private data and write the stream header to an output media file.
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
int width
picture width / height.
Definition: avcodec.h:1873
#define AVFMT_GLOBALHEADER
Format wants global header.
Definition: avformat.h:485
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:119
void sws_freeContext(struct SwsContext *swsContext)
Free the swscaler context swsContext.
static void fill_yuv_image(AVFrame *pict, int frame_index, int width, int height)
Definition: muxing.c:440
static int write_video_frame(AVFormatContext *oc, OutputStream *ost)
Definition: muxing.c:510
int mb_decision
macroblock decision mode
Definition: avcodec.h:2249
int64_t av_rescale_rnd(int64_t a, int64_t b, int64_t c, enum AVRounding rnd) av_const
Rescale a 64-bit integer with specified rounding.
#define STREAM_PIX_FMT
Definition: muxing.c:48
preferred ID for MPEG-1/2 video decoding
Definition: avcodec.h:196
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
AVCodecContext * enc
Definition: muxing.c:55
Stream structure.
Definition: avformat.h:889
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:248
int frame_size
Number of samples per channel in an audio frame.
Definition: avcodec.h:2468
#define STREAM_DURATION
Definition: muxing.c:46
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer...
enum AVCodecID codec_id
Definition: avcodec.h:1703
int sample_rate
samples per second
Definition: avcodec.h:2448
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:215
AVIOContext * pb
I/O context.
Definition: avformat.h:1380
main external API structure.
Definition: avcodec.h:1686
static AVFrame * get_video_frame(OutputStream *ost)
Definition: muxing.c:469
void swr_free(struct SwrContext **s)
Free the given SwrContext and set the pointer to NULL.
int sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
Scale the image slice in srcSlice and put the resulting scaled slice in the image in dst...
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
int sample_rate
Sample rate of the audio data.
Definition: frame.h:348
Rational number (pair of numerator and denominator).
Definition: rational.h:58
attribute_deprecated int avcodec_encode_video2(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
Encode a frame of video.
int avcodec_parameters_from_context(AVCodecParameters *par, const AVCodecContext *codec)
Fill the parameters struct based on the values from the supplied codec context.
#define STREAM_FRAME_RATE
Definition: muxing.c:47
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
void avformat_free_context(AVFormatContext *s)
Free an AVFormatContext and all its streams.
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:198
#define AV_CODEC_FLAG_GLOBAL_HEADER
Place global headers in extradata instead of every keyframe.
Definition: avcodec.h:882
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:1899
Main libavformat public API header.
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:62
#define AVFMT_NOFILE
Demuxer will use avio_open, no opened file should be provided by the caller.
Definition: avformat.h:478
signed 16 bits
Definition: samplefmt.h:61
static void open_audio(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
Definition: muxing.c:215
float t
Definition: muxing.c:64
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
static int write_frame(AVFormatContext *fmt_ctx, const AVRational *time_base, AVStream *st, AVPacket *pkt)
Definition: muxing.c:81
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
int channels
number of audio channels
Definition: avcodec.h:2449
enum AVCodecID audio_codec
default audio codec
Definition: avformat.h:534
AVFrame * tmp_frame
Definition: muxing.c:62
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: avcodec.h:1610
int av_write_trailer(AVFormatContext *s)
Write the stream trailer to an output media file and free the file private data.
int height
Definition: frame.h:236
#define M_PI
Definition: mathematics.h:52
AVCodecParameters * codecpar
Definition: avformat.h:1241
static int height
int stream_index
Definition: avcodec.h:1613
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avformat.h:926
int av_opt_set_sample_fmt(void *obj, const char *name, enum AVSampleFormat fmt, int search_flags)
AVPixelFormat
Pixel format.
Definition: pixfmt.h:60
This structure stores compressed data.
Definition: avcodec.h:1588
void av_register_all(void)
Initialize libavformat and register all the muxers, demuxers and protocols.
int avio_closep(AVIOContext **s)
Close the resource accessed by the AVIOContext *s, free it and set the pointer pointing to it to NULL...
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:241
float tincr2
Definition: muxing.c:64
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1604
struct SwsContext * sws_ctx
Definition: muxing.c:66
static AVFrame * get_audio_frame(OutputStream *ost)
Definition: muxing.c:280
int swr_init(struct SwrContext *s)
Initialize context after user parameters have been set.
static int width