[Libav-user] ffmpeg encoding example
Gonzalo Garramuno
ggarra13 at gmail.com
Mon Sep 17 01:40:56 CEST 2012
I have tried muxing.c with some modifications and I get a movie file but
that starts at a timestamp other than 0, frame 3 or 5 to be precise of
24fps, depending codec, and lacks the last 3-5 frames fed into it.
#define STREAM_PIX_FMT PIX_FMT_YUV420P /* default pix_fmt */
static AVFrame *picture, *tmp_picture;
static uint8_t *video_outbuf;
static int frame_count = 0, video_outbuf_size;
static AVFrame *alloc_picture(enum PixelFormat pix_fmt, int width, int
height)
{
AVFrame *picture;
uint8_t *picture_buf;
int size;
picture = avcodec_alloc_frame();
if (!picture)
return NULL;
size = avpicture_get_size(pix_fmt, width, height);
picture_buf = (uint8_t*)av_malloc(size);
if (!picture_buf) {
av_free(picture);
return NULL;
}
avpicture_fill((AVPicture *)picture, picture_buf,
pix_fmt, width, height);
return picture;
}
static bool open_video(AVFormatContext *oc, AVStream *st,
const CMedia* img )
{
AVCodecContext* c = st->codec;
/* find the video encoder */
AVCodec* codec = avcodec_find_encoder(c->codec_id);
if (!codec) {
LOG_ERROR( _("Video codec not found") );
return false;
}
/* open the codec */
if (avcodec_open2(c, codec, NULL) < 0) {
LOG_ERROR( _("Could not open video codec") );
return false;
}
video_outbuf = NULL;
if (!(oc->oformat->flags & AVFMT_RAWPICTURE)) {
/* Allocate output buffer. */
/* XXX: API change will be done. */
/* Buffers passed into lav* can be allocated any way you prefer,
* as long as they're aligned enough for the architecture, and
* they're freed appropriately (such as using av_free for buffers
* allocated with av_malloc). */
video_outbuf_size = 2048*2048*3;
video_outbuf = (uint8_t*)av_malloc(video_outbuf_size);
}
/* Allocate the encoded raw picture. */
picture = alloc_picture(c->pix_fmt, img->width(), img->height());
if (!picture) {
LOG_ERROR( _("Could not allocate picture") );
return false;
}
return true;
}
static void close_video(AVFormatContext *oc, AVStream *st)
{
avcodec_close(st->codec);
av_free(picture->data[0]);
av_free(picture);
av_free(video_outbuf);
}
/* prepare a dummy image */
static void fill_yuv_image(AVFrame *pict, const CMedia* img )
{
CMedia* m = (CMedia*) img;
image_type_ptr hires = img->hires();
unsigned w = img->width();
unsigned h = img->height();
for ( unsigned y = 0; y < h; ++y )
{
for ( unsigned x = 0; x < w; ++x )
{
ImagePixel p = hires->pixel( x, y );
if ( img->gamma() != 1.0f )
{
float gamma = 1.0f/img->gamma();
p.r = powf( p.r, gamma );
p.g = powf( p.g, gamma );
p.b = powf( p.b, gamma );
if (p.r < 0.0f) p.r = 0.0f;
if (p.g < 0.0f) p.g = 0.0f;
if (p.b < 0.0f) p.b = 0.0f;
if (p.r > 1.0f) p.r = 1.0f;
if (p.g > 1.0f) p.g = 1.0f;
if (p.b > 1.0f) p.b = 1.0f;
}
ImagePixel yuv = color::rgb::to_ITU601( p );
pict->data[0][y * pict->linesize[0] + x ] = yuv.r;
unsigned x2 = x / 2;
unsigned y2 = y / 2;
pict->data[1][y2 * pict->linesize[1] + x2 ] = yuv.g;
pict->data[2][y2 * pict->linesize[2] + x2 ] = yuv.b;
}
}
}
static bool write_video_frame(AVFormatContext* oc, AVStream* st,
const CMedia* img )
{
int out_size, ret;
AVCodecContext *c = NULL;
c = st->codec;
if (frame_count >= img->last_frame() - img->first_frame() + 1) {
/* No more frames to compress. The codec has a latency of a few
* frames if using B-frames, so we get the last frames by
* passing the same picture again. */
} else {
fill_yuv_image( picture, img );
}
AVPacket pkt;
av_init_packet(&pkt);
pkt.data = video_outbuf;
pkt.size = video_outbuf_size;
int got_pic = 0;
/* encode the image */
ret = avcodec_encode_video2(c, &pkt, picture, &got_pic);
if (!ret && got_pic && c->coded_frame) {
c->coded_frame->pts = pkt.pts;
c->coded_frame->key_frame = !!(pkt.flags & AV_PKT_FLAG_KEY);
}
/* free any side data since we cannot return it */
if (pkt.side_data_elems > 0) {
int i;
for (i = 0; i < pkt.side_data_elems; i++)
av_free(pkt.side_data[i].data);
av_freep(&pkt.side_data);
pkt.side_data_elems = 0;
}
/* If size is zero, it means the image was buffered. */
ret = ret ? ret : pkt.size;
if (c->coded_frame->pts != AV_NOPTS_VALUE)
pkt.pts = av_rescale_q(c->coded_frame->pts,
c->time_base, st->time_base);
if (c->coded_frame->key_frame)
pkt.flags |= AV_PKT_FLAG_KEY;
pkt.stream_index = st->index;
/* Write the compressed frame to the media file. */
ret = av_interleaved_write_frame(oc, &pkt);
if (ret != 0) {
LOG_ERROR( _("Error while writing video frame") );
return false;
}
if ( frame_count >= img->last_frame() - img->first_frame() + 1 )
{
cerr << "got pic" << endl;
for (got_pic = 1; got_pic; ++frame_count) {
ret = avcodec_encode_video2(c, &pkt, NULL, &got_pic);
if (ret < 0) {
LOG_ERROR( "error encoding video frame");
break;
}
if (got_pic) {
av_free_packet(&pkt);
}
}
}
frame_count++;
return true;
}
static AVStream *add_video_stream(AVFormatContext *oc,
AVCodec** codec,
enum CodecID codec_id,
const CMedia* img )
{
/* find the video encoder */
*codec = avcodec_find_encoder(codec_id);
if (!(*codec)) {
LOG_ERROR( _( "Video codec not found") );
return NULL;
}
AVStream* st = avformat_new_stream(oc, NULL);
if (!st) {
LOG_ERROR( _("Could not alloc stream") );
return NULL;
}
AVCodecContext* c = st->codec;
avcodec_get_context_defaults3(c, *codec);
c->codec_id = codec_id;
/* resolution must be a multiple of two */
c->width = (img->width() / 2) * 2;
c->height = (img->height() / 2) * 2;
/* put sample parameters */
c->bit_rate = c->width * c->height * 3;
c->bit_rate_tolerance = 5000000;
c->global_quality = 1;
c->compression_level = FF_COMPRESSION_DEFAULT;
/* time base: this is the fundamental unit of time (in seconds) in terms
of which frame timestamps are represented. for fixed-fps content,
timebase should be 1/framerate and timestamp increments should be
identically 1. */
c->time_base.den = img->fps();
c->time_base.num = 1;
c->ticks_per_frame = 2;
c->gop_size = 12; /* emit one intra frame every twelve frames at most */
c->pix_fmt = STREAM_PIX_FMT;
c->max_b_frames = 1;
c->me_method = 5;
if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
/* just for testing, we also add B frames */
c->max_b_frames = 2;
}
// c->b_quant_factor = 1;
// c->b_quant_offset = 0.0f;
// c->mpeg_quant = 0;
// c->i_quant_factor = 1.0f;
// c->i_quant_offset = 0.0f;
// c->p_masking = 0.0f;
// c->dark_masking = 0.0f;
// c->me_cmp = 7;
// c->me_sub_cmp = 7;
// c->ildct_cmp = FF_CMP_SSE;
// c->last_predictor_count = 2;
// c->pre_me = 7;
// c->me_pre_cmp = 7;
// c->pre_dia_size = 8;
// c->me_subpel_quality = 2;
// c->me_range = 0;
// c->intra_quant_bias = FF_DEFAULT_QUANT_BIAS;
// c->inter_quant_bias = FF_DEFAULT_QUANT_BIAS;
// c->mb_decision = FF_MB_DECISION_RD;
// c->me_threshold = 8;
// c->mb_threshold = 8;
// c->intra_dc_precision = 1;
// c->keyint_min = 4;
// some formats want stream headers to be separate
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
c->flags |= CODEC_FLAG_GLOBAL_HEADER;
return st;
}
static AVFormatContext *oc = NULL;
static AVOutputFormat* fmt = NULL;
static AVStream* audio_st = NULL, *video_st = NULL;
bool aviImage::open_movie( const char* filename, const CMedia* img )
{
int ret = 0;
int i;
frame_count = 0;
av_register_all();
if ( oc == NULL )
{
avformat_alloc_output_context2(&oc, NULL, NULL, filename);
if (!oc) {
LOG_INFO( _("Could not deduce output format from file extension: using
MPEG.") );
avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
}
fmt = oc->oformat;
video_st = NULL;
audio_st = NULL;
if (fmt->video_codec != CODEC_ID_NONE) {
video_st = add_video_stream(oc, &video_codec, fmt->video_codec, img);
}
if (img->has_audio() && fmt->audio_codec != CODEC_ID_NONE) {
audio_st = add_audio_stream(oc, &audio_cdc, fmt->audio_codec);
}
/* Now that all the parameters are set, we can open the audio and
* video codecs and allocate the necessary encode buffers. */
if (video_st)
if ( ! open_video(oc, video_st, img) )
return false;
if (audio_st)
if ( ! open_audio_static(oc, audio_cdc, audio_st) )
{
audio_st = NULL;
if ( !video_st ) return false;
}
if (!(fmt->flags & AVFMT_NOFILE)) {
if (avio_open(&oc->pb, filename, AVIO_FLAG_WRITE) < 0) {
LOG_ERROR( _("Could not open '") << filename << "'" );
return false;
}
}
picture->pts = 0;
/* Write the stream header, if any. */
avformat_write_header(oc, NULL);
}
return true;
}
bool aviImage::save_movie_frame( const CMedia* img )
{
double audio_pts, video_pts;
if (audio_st)
audio_pts = ((double)audio_st->pts.val * audio_st->time_base.num /
audio_st->time_base.den);
else
audio_pts = 0.0;
if (video_st)
video_pts = ((double)video_st->pts.val * video_st->time_base.num /
video_st->time_base.den);
/* write interleaved audio and video frames */
if (!video_st || (video_st && audio_st && audio_pts < video_pts)) {
write_audio_frame(oc, audio_st, img);
} else {
write_video_frame(oc, video_st, img);
picture->pts++;
}
}
bool aviImage::close_movie()
{
/* Write the trailer, if any. The trailer must be written before you
* close the CodecContexts open when you wrote the header; otherwise
* av_write_trailer() may try to use memory that was freed on
* av_codec_close(). */
av_write_trailer(oc);
/* Close each codec. */
if (video_st)
close_video(oc, video_st);
if (audio_st)
close_audio_static(oc, audio_st);
/* Free the streams. */
for (int i = 0; i < oc->nb_streams; i++) {
av_freep(&oc->streams[i]->codec);
av_freep(&oc->streams[i]);
}
if (!(fmt->flags & AVFMT_NOFILE))
/* Close the output file. */
avio_close(oc->pb);
/* free the stream */
av_free(oc);
oc = NULL;
return true;
}
void save_sequence_file( CMedia* img, const mrv::ViewerUI* uiMain,
const char* startdir)
{
if (!img) return;
const char* file = flu_save_chooser("Save Sequence",
kIMAGE_PATTERN.c_str(), startdir);
if ( !file ) return;
std::string tmp = file;
std::transform( tmp.begin(), tmp.end(), tmp.begin(),
(int(*)(int)) tolower);
std::string ext = tmp.c_str() + tmp.size() - 4;
bool movie = false;
if ( ext == ".avi" || ext == ".mov" || ext == ".mp4" || ext == ".wmv" )
{
movie = true;
}
std::string root, fileseq = file;
bool ok = mrv::fileroot( root, fileseq );
if ( !ok && !movie ) return;
if ( movie )
{
root = root.substr( 0, root.size() - 4 );
}
fltk::ProgressBar* progress = NULL;
fltk::Window* main = (fltk::Window*)uiMain->uiMain;
fltk::Window* w = new fltk::Window( main->x(), main->y() +
main->h()/2,
main->w(), 80 );
w->child_of(main);
w->begin();
mrv::Timeline* timeline = uiMain->uiTimeline;
int64_t first = timeline->minimum();
int64_t last = timeline->maximum();
progress = new fltk::ProgressBar( 0, 20, w->w(), w->h()-20 );
progress->range( 0, last - first + 1 );
progress->align( fltk::ALIGN_TOP );
char title[1024];
sprintf( title, "Saving Sequence %" PRId64 " - %" PRId64,
first, last );
progress->label( title );
progress->showtext(true);
w->end();
w->show();
fltk::check();
int64_t dts = first;
int64_t frame = first;
int64_t failed_frame = frame-1;
const char* fileroot = root.c_str();
mrv::media old;
bool open_movie = false;
int movie_count = 1;
bool edl = uiMain->uiTimeline->edl();
for ( ; frame <= last; ++frame )
{
int step = 1;
uiMain->uiReelWindow->uiBrowser->seek( frame );
mrv::media fg = uiMain->uiView->foreground();
if (!fg) break;
CMedia* img = fg->image();
if ( old != fg )
{
old = fg;
if ( open_movie )
{
aviImage::close_movie();
open_movie = false;
}
if ( movie )
{
char buf[256];
if ( edl )
{
sprintf( buf, "%s%d%s", root.c_str(), movie_count,
ext.c_str() );
}
else
{
sprintf( buf, "%s%s", root.c_str(), ext.c_str() );
}
if ( fs::exists( buf ) )
{
int ok = fltk::ask( "Do you want to replace '%s'",
buf );
if (!ok)
{
break;
}
}
if ( aviImage::open_movie( buf, img ) )
{
open_movie = true;
++movie_count;
}
}
}
{
if (movie)
{
aviImage::save_movie_frame( img );
}
else
{
char buf[1024];
sprintf( buf, fileroot, frame );
img->save( buf );
}
}
progress->step(1);
fltk::check();
if ( !w->visible() ) {
break;
}
}
if ( open_movie )
{
aviImage::close_movie();
open_movie = false;
}
if ( w )
{
w->hide();
w->destroy();
}
}
--
Gonzalo Garramuño
ggarra13 at gmail.com
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://ffmpeg.org/pipermail/libav-user/attachments/20120916/71d7b199/attachment.html>
More information about the Libav-user
mailing list