[Libav-user] Using the latest ffmpeg-libs on Fedora 24
Sébastien Le Roux
sebastien.leroux at ipcms.unistra.fr
Tue Nov 29 13:08:06 EET 2016
Dear all,
first thanks for reading this message, I hope that I found the
appropriate place to ask for advise.
I wrote/am writing a program with an OpenGL interface as well as an
interface to encode movies
using the ffmpeg libraries, so far everything worked nicely and I was
able to encode movie from
my OpenGL window (mpeg, mpeg4, h264, theora and flash) but recently I
updated the lib my Fedora
to use the latest lib available (ffmpeg-libs-3.1.5-1.fc24.x86_64,
ffmpeg-3.1.5-1.fc24.x86_64, ffmpeg-devel-3.1.5-1.fc24.x86_64)
when compiling the program I received many warning about function being
depreciated, and I try
to update to code, unsuccessfully so far, that is why I am sending this
email today.
You will find two pieces of code attached to this message:
- (1) old-movie.c
- (2) new-movie.c
(1) is the old version of my code, that send many warning when compiling
with the new lib, but work great
nevertheless, I can encode mpeg, mpeg4, h264, theora and flash movie
perfectly ... well the code
might not be perfect I learned by my-self after all but the results are
just fine.
(2) is the new version of my code, no warning at compilation time
(expect for the 'avcodec_encode_video2'
but so far I could not get rid of this instruction), but It woks poorly,
I can only encode mpeg and mpeg4
movie, the h264 file is empty after the job, and the theora encoding
results in a fatal error that kills the
program ... not what I whish for obviously ...
I do not understand what I am missing, I tried to used all the examples
provide by the "https://ffmpeg.org/doxygen"$
pages but so far ... well not so good ...
I would really appreciate any help to understand what I do wrong here,
so thanks in advance for your lights !
Not to mention that I would gladly take any advise to improve my code,
in case something was wrong anyway.
Best regards
Sébastien Le Roux
--
===========================================================
Dr. Sébastien Le Roux
Ingénieur de Recherche CNRS
Institut de Physique et Chimie des Matériaux de Strasbourg
Département des Matériaux Organiques
23, rue du Loess
BP 43
F-67034 Strasbourg Cedex 2, France
E-mail: sebastien.leroux at ipcms.unistra.fr
Webpage: http://www.ipcms.unistra.fr/?page_id=14965&lang=en
RINGS project: http://rings-code.sourceforge.net/
ISAACS project: http://isaacs.sourceforge.net/
Fax: +33 3 88 10 72 46
Phone: +33 3 88 10 71 58
===========================================================
-------------- next part --------------
#include <libavcodec/avcodec.h>
#include <libavutil/avutil.h>
#if LIBAVCODEC_VERSION_MAJOR > 54
#include <libavutil/imgutils.h>
#endif
#include <libavformat/avformat.h>
#include <libavutil/opt.h>
#include <libswscale/swscale.h>
#include "global.h"
#include "interface.h"
#include "project.h"
#include "glwindow.h"
#include "glview.h"
#include "movie.h"
#if LIBAVCODEC_VERSION_MAJOR < 56
# define PIXEL_FORMAT PIX_FMT_YUV420P
#else
# define PIXEL_FORMAT AV_PIX_FMT_YUV420P
#endif
#define AVS_FRAME_ALIGN 16
#define RGB_TO_Y(pixels, loc) (0.29900 * pixels[loc] + 0.58700 * pixels[loc+1] + 0.11400 * pixels[loc+2])
#define RGB_TO_U(pixels, loc)(-0.16874 * pixels[loc] - 0.33126 * pixels[loc+1] + 0.50000 * pixels[loc+2]+128.0)
#define RGB_TO_V(pixels, loc) (0.50000 * pixels[loc] - 0.41869 * pixels[loc+1] - 0.08131 * pixels[loc+2]+128.0)
#define AVIO_FLAG_READ 1
#define AVIO_FLAG_WRITE 2
#define AVIO_FLAG_READ_WRITE (AVIO_FLAG_READ|AVIO_FLAG_WRITE)
#ifndef AV_ROUND_PASS_MINMAX
#define AV_ROUND_PASS_MINMAX 8192
#endif
#ifndef URL_WRONLY
# define URL_WRONLY 1
#endif
char * codec_name[VIDEO_CODECS] = {"MPEG-1/2",
"MPEG-4",
"H264",
"Theora",
"Flash"};
char * codec_list[VIDEO_CODECS] = {"mpeg",
"avi",
"mkv",
"ogv",
"flv"};
#if LIBAVCODEC_VERSION_MAJOR > 54
int codec_id[VIDEO_CODECS] = {AV_CODEC_ID_MPEG2VIDEO,
AV_CODEC_ID_MPEG4,
AV_CODEC_ID_H264,
AV_CODEC_ID_THEORA,
AV_CODEC_ID_FLV1};
#else
int codec_id[VIDEO_CODECS] = {CODEC_ID_MPEG2VIDEO,
CODEC_ID_MPEG4,
CODEC_ID_H264,
CODEC_ID_THEORA,
CODEC_ID_FLV1};
#endif
static struct SwsContext * sws_context = NULL;
GdkPixbuf * pixbuf;
uint8_t * video_outbuf;
int video_outbuf_size;
int num_frames;
void convert_rgb_pixbuf_to_yuv (GdkPixbuf * pixbuf, AVFrame * picture, int w, int h)
{
gint x, y, location, location2;
gint inner_x, inner_y, half_location;
gfloat cr, cb;
gint pixbuf_xsize, pixbuf_ysize;
guchar * pixels;
gint row_stride;
gboolean x_odd, y_odd;
pixbuf_xsize = gdk_pixbuf_get_width (pixbuf);
pixbuf_ysize = gdk_pixbuf_get_height (pixbuf);
pixels = gdk_pixbuf_get_pixels (pixbuf);
row_stride = gdk_pixbuf_get_rowstride (pixbuf);
y_odd = (pixbuf_ysize & 0x1);
x_odd = (pixbuf_xsize & 0x1);
/* note, the Cr and Cb info is subsampled by 2x2 */
for (y=0; y<pixbuf_ysize-1; y+=2)
{
for (x=0; x<pixbuf_xsize-1; x+=2)
{
cb = 0.0;
cr = 0.0;
for (inner_y = y; inner_y < y+2; inner_y++)
for (inner_x = x; inner_x < x+2; inner_x++)
{
location = inner_y*row_stride+3*inner_x;
picture -> data[0][inner_x+inner_y*w] = RGB_TO_Y (pixels, location);
cb += RGB_TO_U (pixels, location);
cr += RGB_TO_V (pixels, location);
}
half_location = x/2 + y*w/4;
picture -> data[1][half_location] = cb/4.0;
picture -> data[2][half_location] = cr/4.0;
}
if (x_odd)
{
location = y*row_stride+3*x;
location2 = (y+1)*row_stride+3*x;
picture -> data[0][x+y*w] = RGB_TO_Y (pixels, location);
picture -> data[0][x+1+y*w] = 0;
picture -> data[0][x+(y+1)*w] = RGB_TO_Y (pixels, location2);
picture -> data[0][x+1+(y+1)*w] = 0;
half_location = x/2 + y*w/4;
picture -> data[1][half_location] = (RGB_TO_U(pixels, location) + RGB_TO_U(pixels, location2)+256)/4.0;
picture -> data[2][half_location] = (RGB_TO_V(pixels, location) + RGB_TO_V(pixels, location2)+256)/4.0;
}
}
if (y_odd)
{
for (x=0; x<pixbuf_xsize-1; x+=2)
{
location = y*row_stride+3*x;
location2 = y*row_stride+3*(x+1);
picture -> data[0][x+y*w] = RGB_TO_Y(pixels, location);
picture -> data[0][x+1+y*w] = RGB_TO_Y(pixels, location2);
picture -> data[0][x+(y+1)*w] = 0;
picture -> data[0][x+1+(y+1)*w] = 0;
half_location = x/2 + y*w/4;
picture -> data[1][half_location] = (RGB_TO_U(pixels, location)+RGB_TO_U(pixels, location2)+256)/4.0;
picture -> data[2][half_location] = (RGB_TO_V(pixels, location)+RGB_TO_V(pixels, location2)+256)/4.0;
}
if (x_odd)
{
location = y*row_stride+3*x;
picture -> data[0][x+y*w] = RGB_TO_Y(pixels, location);
picture -> data[0][x+1+y*w] = 0;
picture -> data[0][x+(y+1)*w] = 0;
picture -> data[0][x+1+(y+1)*w] = 0;
half_location = x/2 + y*w/4;
picture -> data[1][half_location] = (RGB_TO_U(pixels, location)+384)/4.0;
picture -> data[2][half_location] = (RGB_TO_V(pixels, location)+384)/4.0;
}
}
}
static void ffmpeg_encoder_set_frame_yuv_from_rgb (uint8_t * rgb, AVFrame * frame, int width, int height)
{
const int in_linesize = 4*width;
sws_context = sws_getCachedContext (sws_context,
width, height, AV_PIX_FMT_BGRA,
width, height, PIXEL_FORMAT,
0, NULL, NULL, NULL);
sws_scale (sws_context, (const uint8_t * const *)&rgb, & in_linesize, 0, height, frame -> data, frame -> linesize);
}
static GLubyte * read_opengl_image (gboolean movie, unsigned int width, unsigned int height)
{
size_t i, nvals;
nvals = width * height * 4;
GLubyte * pixels = malloc (nvals * sizeof(GLubyte));
GLubyte * rgb = malloc (nvals * sizeof(GLubyte));
glReadPixels (0, 0, width, height, GL_BGRA, GL_UNSIGNED_BYTE, pixels);
// Flip data veritcally
for (i = 0; i < height; i++)
{
memcpy (rgb + 4 * width * i, pixels + 4 * width * (height - i - 1), 4 * width);
}
free (pixels);
return rgb;
}
void fill_image (AVFrame * frame, int width, int height, gboolean movie, glwin * view)
{
// opengl call is here !!!
reshape (view, width, height);
draw (view);
GLubyte * image = read_opengl_image (movie, width, height);
if (movie)
{
ffmpeg_encoder_set_frame_yuv_from_rgb (image, frame, width, height);
}
else
{
cairo_surface_t * surf = cairo_image_surface_create_for_data ((guchar *)image, CAIRO_FORMAT_ARGB32,
width, height, cairo_format_stride_for_width(CAIRO_FORMAT_ARGB32, width));
pixbuf = convert_to_pixbuf (surf);
cairo_surface_destroy (surf);
}
free (image);
}
static void write_video_frame (AVFormatContext * f_context,
AVCodecContext * c_context, AVStream * stream,
AVFrame * frame, int frame_id, glwin * view)
{
int out_size;
int got_frame = 1;
fill_image (frame, c_context -> width, c_context -> height, TRUE, view);
AVPacket packet;
av_init_packet (& packet);
packet.data = NULL;
packet.size = 0;
frame -> pts = frame_id;
out_size = avcodec_encode_video2 (c_context, & packet, frame, & got_frame);
// The following does not work, I found nothing else but to stick the "outdated" avcodec_encode_video2
//got_frame = 1;
//out_size = avcodec_send_frame (c_context, frame);
if (out_size < 0)
{
// "Error while encoding video frame"
g_warning ("MOVIE_ENCODING:: VIDEO_FRAME:: error:: %s", av_err2str (out_size));
}
else
{
if (got_frame == 0)
{
// "Error while encoding video frame"
g_warning ("MOVIE_ENCODING:: VIDEO_FRAME:: warning:: frame ignored, packet empty, frame= %d", frame_id);
}
else
{
packet.stream_index = stream -> index;
packet.dts = av_rescale_q_rnd (packet.dts,
c_context -> time_base,
stream -> time_base,
AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
packet.pts = av_rescale_q_rnd (packet.pts,
c_context -> time_base,
stream -> time_base,
AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
packet.duration = av_rescale_q (packet.duration,
c_context -> time_base,
stream -> time_base);
packet.flags |= AV_PKT_FLAG_KEY;
out_size = av_interleaved_write_frame (f_context, & packet);
if (out_size != 0)
{
// "Error while writting video frame"
g_warning ("MOVIE_ENCODING:: VIDEO_FRAME:: error:: %s", av_err2str(out_size));
}
}
}
}
static AVFrame * alloc_video_frame (int width, int height)
{
AVFrame * frame;
frame = av_frame_alloc ();
if (! frame)
{
return NULL;
}
frame -> format = PIXEL_FORMAT;
frame -> width = width;
frame -> height = height;
if (av_image_alloc (frame -> data, frame -> linesize, frame -> width, frame -> height, frame -> format, 32) < 0)
{
g_warning ("MOVIE_ENCODING:: Could not allocate picture buffer");
return NULL;
}
return frame;
}
AVStream * add_video_stream (AVFormatContext * format,
AVCodec * codec,
AVCodecContext * context,
int framesec,
int extraframes,
int bitrate,
int video_res[2])
{
AVStream * st;
st = avformat_new_stream (format, codec);
if (! st)
{
g_warning ("MOVIE_ENCODING:: Could not create video stream");
return NULL;
}
/* put sample parameters */
context -> bit_rate_tolerance = bitrate*1000;
context -> bit_rate = bitrate*1000;
/* resolution must be a multiple of two */
context -> width = video_res[0];
context -> height = video_res[1];
/* time base: this is the fundamental unit of time (in seconds) in terms
of which frame timestamps are represented. for fixed-fps content,
timebase should be 1/framerate and timestamp increments should be
identically 1. */
context -> time_base.den = framesec; /* 25 images/s */
context -> time_base.num = 1;
context -> gop_size = extraframes; /* emit one intra frame every twelve frames at most */
context -> pix_fmt = PIXEL_FORMAT;
/*
Some container formats (like MP4) require global headers to be present
Mark the encoder so that it behaves accordingly.
*/
if (format -> oformat -> flags & AVFMT_GLOBALHEADER)
{
context -> flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
if (avcodec_parameters_from_context (st -> codecpar, context) < 0)
{
g_debug ("MOVIE_ENCODING:: Failed to copy codec parameters to video stream");
return NULL;
}
return st;
}
gboolean create_movie (glwin * view,
GtkProgressBar * bar,
int framesec,
int extraframes,
int codec,
int oglquality,
int bitrate,
int video_res[2],
gchar * videofile)
{
int i, q;
AVCodec * av_codec = NULL;
AVFormatContext * format_context = NULL;
AVCodecContext * av_codec_context = NULL;
AVStream * av_stream = NULL;
int error;
#ifdef DEBUG
g_debug ("VIDEO ENCODING:: frames per seconds:: %d", framesec);
g_debug ("VIDEO ENCODING:: extra frames every:: %d frame(s)", extraframes);
g_debug ("VIDEO ENCODING:: birate:: %d", bitrate);
g_debug ("VIDEO ENCODING:: video_x = %d , video_y = %d", video_res[0], video_res[1]);
g_debug ("VIDEO ENCODING:: codec:: %d, name= %s, ext= %s", codec, codec_name[codec], codec_list[codec]);
#endif // DEBUG
num_frames = view -> anim -> frames;
av_register_all ();
avcodec_register_all ();
if (! (format_context = avformat_alloc_context()))
{
g_warning ("MOVIE_ENCODING:: Could not allocate AV format context");
return FALSE;
}
// Guess the desired container format based on file extension
if (! (format_context -> oformat = av_guess_format (NULL, videofile, NULL)))
{
g_warning ("MOVIE_ENCODING:: Could not deduce container format: please change file name");
return FALSE;
}
if (! (av_codec = avcodec_find_encoder (codec_id[codec])))
{
g_warning ("MOVIE_ENCODING:: Could not find codec:: %s", codec_name[codec]);
return FALSE;
}
if (! (av_codec_context = avcodec_alloc_context3(av_codec)))
{
g_warning ("MOVIE_ENCODING:: Could not allocate an encoding context");
return FALSE;
}
av_stream = add_video_stream (format_context,
av_codec,
av_codec_context,
framesec,
extraframes,
bitrate,
video_res);
if (! av_stream)
{
// Error impossible to allocate stream
g_warning ("MOVIE_ENCODING:: could not create video stream");
return FALSE;
}
if (codec_id[codec] == AV_CODEC_ID_H264) av_opt_set (av_stream -> priv_data, "preset", "slow", 0);
/* open the codec */
if ((error = avcodec_open2 (av_codec_context, av_codec, NULL)) < 0)
{
// Can not open codec
g_warning ("MOVIE_ENCODING:: could not open codec, error= %s", av_err2str(error));
return FALSE;
}
AVFrame * picture = alloc_video_frame (av_codec_context -> width,
av_codec_context -> height);
av_dump_format (format_context, 0, videofile, 1);
if (avio_open (& format_context -> pb, videofile, AVIO_FLAG_WRITE) < 0)
{
// error impossible to open output file
g_warning ("MOVIE_ENCODING:: Impossible to open the video file '%s'", videofile);
return FALSE;
}
if (avformat_write_header (format_context, NULL) < 0)
{
g_warning ("MOVIE_ENCODING:: Impossible to write the AV format header");
return FALSE;
}
// View is the name of a structure that contains the data I want to render
view -> anim -> last = view -> anim -> first;
int frame_id;
for (frame_id = 0; frame_id < num_frames; frame_id ++)
{
write_video_frame (format_context, av_codec_context, av_stream, picture, frame_id, view);
if (frame_id < num_frames-1) view -> anim -> last = view -> anim -> last -> next;
}
av_write_trailer (format_context);
avcodec_close (av_codec_context);
/* free the streams */
for (i=0; i<format_context -> nb_streams; i++)
{
av_freep (& format_context -> streams[i]);
}
av_freep (& picture -> data[0]);
av_frame_free (& picture);
if (!(format_context -> flags & AVFMT_NOFILE))
{
/* close the output file */
avio_close (format_context -> pb);
}
/* free the stream */
av_free (format_context);
return TRUE;
}
-------------- next part --------------
#include <libavcodec/avcodec.h>
#include <libavutil/avutil.h>
#if LIBAVCODEC_VERSION_MAJOR > 54
#include <libavutil/imgutils.h>
#endif
#include <libavformat/avformat.h>
#include <libavutil/opt.h>
#include <libswscale/swscale.h>
#include "global.h"
#include "interface.h"
#include "project.h"
#include "glwindow.h"
#include "glview.h"
#include "movie.h"
#if LIBAVCODEC_VERSION_MAJOR < 56
# define PIXEL_FORMAT PIX_FMT_YUV420P
#else
# define PIXEL_FORMAT AV_PIX_FMT_YUV420P
#endif
#define AVS_FRAME_ALIGN 16
#define RGB_TO_Y(pixels, loc) (0.29900 * pixels[loc] + 0.58700 * pixels[loc+1] + 0.11400 * pixels[loc+2])
#define RGB_TO_U(pixels, loc)(-0.16874 * pixels[loc] - 0.33126 * pixels[loc+1] + 0.50000 * pixels[loc+2]+128.0)
#define RGB_TO_V(pixels, loc) (0.50000 * pixels[loc] - 0.41869 * pixels[loc+1] - 0.08131 * pixels[loc+2]+128.0)
#define AVIO_FLAG_READ 1
#define AVIO_FLAG_WRITE 2
#define AVIO_FLAG_READ_WRITE (AVIO_FLAG_READ|AVIO_FLAG_WRITE)
#ifndef AV_ROUND_PASS_MINMAX
#define AV_ROUND_PASS_MINMAX 8192
#endif
#ifndef URL_WRONLY
# define URL_WRONLY 1
#endif
char * codec_name[VIDEO_CODECS] = {"MPEG-1/2",
"MPEG-4",
"H264",
"Theora",
"Flash"};
char * codec_list[VIDEO_CODECS] = {"mpeg",
"avi",
"mkv",
"ogv",
"flv"};
#if LIBAVCODEC_VERSION_MAJOR > 54
int codec_id[VIDEO_CODECS] = {AV_CODEC_ID_MPEG2VIDEO,
AV_CODEC_ID_MPEG4,
AV_CODEC_ID_H264,
AV_CODEC_ID_THEORA,
AV_CODEC_ID_FLV1};
#else
int codec_id[VIDEO_CODECS] = {CODEC_ID_MPEG2VIDEO,
CODEC_ID_MPEG4,
CODEC_ID_H264,
CODEC_ID_THEORA,
CODEC_ID_FLV1};
#endif
static struct SwsContext * sws_context = NULL;
GdkPixbuf * pixbuf;
uint8_t * video_outbuf;
int video_outbuf_size;
int num_frames;
void convert_rgb_pixbuf_to_yuv (GdkPixbuf * pixbuf, AVFrame * picture, int w, int h)
{
gint x, y, location, location2;
gint inner_x, inner_y, half_location;
gfloat cr, cb;
gint pixbuf_xsize, pixbuf_ysize;
guchar * pixels;
gint row_stride;
gboolean x_odd, y_odd;
pixbuf_xsize = gdk_pixbuf_get_width (pixbuf);
pixbuf_ysize = gdk_pixbuf_get_height (pixbuf);
pixels = gdk_pixbuf_get_pixels (pixbuf);
row_stride = gdk_pixbuf_get_rowstride (pixbuf);
y_odd = (pixbuf_ysize & 0x1);
x_odd = (pixbuf_xsize & 0x1);
/* note, the Cr and Cb info is subsampled by 2x2 */
for (y=0; y<pixbuf_ysize-1; y+=2)
{
for (x=0; x<pixbuf_xsize-1; x+=2)
{
cb = 0.0;
cr = 0.0;
for (inner_y = y; inner_y < y+2; inner_y++)
for (inner_x = x; inner_x < x+2; inner_x++)
{
location = inner_y*row_stride+3*inner_x;
picture -> data[0][inner_x+inner_y*w] = RGB_TO_Y (pixels, location);
cb += RGB_TO_U (pixels, location);
cr += RGB_TO_V (pixels, location);
}
half_location = x/2 + y*w/4;
picture -> data[1][half_location] = cb/4.0;
picture -> data[2][half_location] = cr/4.0;
}
if (x_odd)
{
location = y*row_stride+3*x;
location2 = (y+1)*row_stride+3*x;
picture -> data[0][x+y*w] = RGB_TO_Y (pixels, location);
picture -> data[0][x+1+y*w] = 0;
picture -> data[0][x+(y+1)*w] = RGB_TO_Y (pixels, location2);
picture -> data[0][x+1+(y+1)*w] = 0;
half_location = x/2 + y*w/4;
picture -> data[1][half_location] = (RGB_TO_U(pixels, location) + RGB_TO_U(pixels, location2)+256)/4.0;
picture -> data[2][half_location] = (RGB_TO_V(pixels, location) + RGB_TO_V(pixels, location2)+256)/4.0;
}
}
if (y_odd)
{
for (x=0; x<pixbuf_xsize-1; x+=2)
{
location = y*row_stride+3*x;
location2 = y*row_stride+3*(x+1);
picture -> data[0][x+y*w] = RGB_TO_Y(pixels, location);
picture -> data[0][x+1+y*w] = RGB_TO_Y(pixels, location2);
picture -> data[0][x+(y+1)*w] = 0;
picture -> data[0][x+1+(y+1)*w] = 0;
half_location = x/2 + y*w/4;
picture -> data[1][half_location] = (RGB_TO_U(pixels, location)+RGB_TO_U(pixels, location2)+256)/4.0;
picture -> data[2][half_location] = (RGB_TO_V(pixels, location)+RGB_TO_V(pixels, location2)+256)/4.0;
}
if (x_odd)
{
location = y*row_stride+3*x;
picture -> data[0][x+y*w] = RGB_TO_Y(pixels, location);
picture -> data[0][x+1+y*w] = 0;
picture -> data[0][x+(y+1)*w] = 0;
picture -> data[0][x+1+(y+1)*w] = 0;
half_location = x/2 + y*w/4;
picture -> data[1][half_location] = (RGB_TO_U(pixels, location)+384)/4.0;
picture -> data[2][half_location] = (RGB_TO_V(pixels, location)+384)/4.0;
}
}
}
static void ffmpeg_encoder_set_frame_yuv_from_rgb (uint8_t * rgb, AVFrame * frame, int width, int height)
{
const int in_linesize = 4*width;
sws_context = sws_getCachedContext (sws_context,
width, height, AV_PIX_FMT_BGRA,
width, height, PIXEL_FORMAT,
0, NULL, NULL, NULL);
sws_scale (sws_context, (const uint8_t * const *)&rgb, & in_linesize, 0, height, frame -> data, frame -> linesize);
}
static GLubyte * read_opengl_image (gboolean movie, unsigned int width, unsigned int height)
{
size_t i, nvals;
nvals = width * height * 4;
GLubyte * pixels = malloc (nvals * sizeof(GLubyte));
GLubyte * rgb = malloc (nvals * sizeof(GLubyte));
glReadPixels (0, 0, width, height, GL_BGRA, GL_UNSIGNED_BYTE, pixels);
// Flip data veritcally
for (i = 0; i < height; i++)
{
memcpy (rgb + 4 * width * i, pixels + 4 * width * (height - i - 1), 4 * width);
}
free (pixels);
return rgb;
}
void fill_image (AVFrame * frame, int width, int height, gboolean movie, glwin * view)
{
// opengl call is here !!!
reshape (view, width, height);
draw (view);
GLubyte * image = read_opengl_image (movie, width, height);
if (movie)
{
//if (movie) convert_rgb_pixbuf_to_yuv (pixbuf, frame, width, height);
ffmpeg_encoder_set_frame_yuv_from_rgb (image, frame, width, height);
}
else
{
cairo_surface_t * surf = cairo_image_surface_create_for_data ((guchar *)image, CAIRO_FORMAT_ARGB32,
width, height, cairo_format_stride_for_width(CAIRO_FORMAT_ARGB32, width));
pixbuf = convert_to_pixbuf (surf);
cairo_surface_destroy (surf);
}
free (image);
}
static void write_video_frame (AVFormatContext * f_context, AVStream * stream,
AVFrame * frame, int frame_id, glwin * view)
{
int out_size;
int got_frame = 1;
AVCodecContext * c_context = stream -> codec;
fill_image (frame, c_context -> width, c_context -> height, TRUE, view);
AVPacket packet;
av_init_packet (& packet);
packet.data = NULL;
packet.size = 0;
frame -> pts = frame_id;
#if LIBAVCODEC_VERSION_MAJOR > 54
out_size = avcodec_encode_video2 (c_context, & packet, frame, & got_frame);
#else
out_size = avcodec_encode_video (c_context, video_outbuf, video_outbuf_size, frame);
#endif
if (out_size < 0)
{
// "Error while encoding video frame"
g_warning ("MOVIE_ENCODING:: VIDEO_FRAME:: error:: %s", av_err2str (out_size));
}
else
{
if (got_frame == 0)
{
// "Error while encoding video frame"
g_warning ("MOVIE_ENCODING:: VIDEO_FRAME:: warning:: packet empty, ignoring frame= %d", frame_id);
}
else
{
packet.stream_index = stream -> index;
packet.dts = av_rescale_q_rnd (packet.dts,
c_context -> time_base,
stream -> time_base,
AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
packet.pts = av_rescale_q_rnd (packet.pts,
c_context -> time_base,
stream -> time_base,
AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
packet.duration = av_rescale_q (packet.duration,
c_context -> time_base,
stream -> time_base);
#if LIBAVCODEC_VERSION_MAJOR > 55
packet.flags |= AV_PKT_FLAG_KEY;
#else
if(c_context -> coded_frame -> key_frame)
{
#if LIBAVCODEC_VERSION_MAJOR > 52
packet.flags |= AV_PKT_FLAG_KEY;
#else
packet.flags |= PKT_FLAG_KEY;
#endif
}
#endif
out_size = av_interleaved_write_frame (f_context, & packet);
if (out_size != 0)
{
// "Error while encoding video frame"
g_warning ("MOVIE_ENCODING:: VIDEO_FRAME:: error:: %s", av_err2str(out_size));
}
}
}
}
static AVFrame * alloc_video_frame (int width, int height)
{
AVFrame * frame;
frame = av_frame_alloc ();
if (! frame)
{
return NULL;
}
frame -> format = PIXEL_FORMAT;
frame -> width = width;
frame -> height = height;
if (av_image_alloc (frame -> data, frame -> linesize, frame -> width, frame -> height, frame -> format, 32) < 0)
{
g_warning ("MOVIE_ENCODING:: Could not allocate picture buffer");
return NULL;
}
return frame;
}
AVStream * add_video_stream (AVFormatContext * fc,
AVCodec * output_codec,
int framesec,
int extraframes,
int bitrate,
int video_res[2])
{
AVStream * st;
#if LIBAVCODEC_VERSION_MAJOR > 53
st = avformat_new_stream (fc, output_codec);
#else
st = av_new_stream (fc, output_codec);
#endif
if (! st)
{
g_warning ("MOVIE_ENCODING:: Could not create video stream");
return NULL;
}
/*#if LIBAVCODEC_VERSION_MAJOR > 52
st -> codec -> codec_type = AVMEDIA_TYPE_VIDEO;
#else
st -> codec -> codec_type = CODEC_TYPE_VIDEO;
#endif*/
/* put sample parameters */
st -> codec -> bit_rate_tolerance = bitrate*1000;
st -> codec -> bit_rate = bitrate*1000;
/* resolution must be a multiple of two */
st -> codec -> width = video_res[0];
st -> codec -> height = video_res[1];
/* time base: this is the fundamental unit of time (in seconds) in terms
of which frame timestamps are represented. for fixed-fps content,
timebase should be 1/framerate and timestamp increments should be
identically 1. */
st -> codec -> time_base.den = framesec; /* 25 images/s */
st -> codec -> time_base.num = 1;
st -> codec -> gop_size = extraframes; /* emit one intra frame every twelve frames at most */
st -> codec -> pix_fmt = PIXEL_FORMAT;
//st -> codec -> max_b_frames = 1;
return st;
}
gboolean create_movie (glwin * view,
GtkProgressBar * bar,
int framesec,
int extraframes,
int codec,
int oglquality,
int bitrate,
int video_res[2],
gchar * videofile)
{
int i, q;
AVCodec * av_codec = NULL;
AVFormatContext * format_context = NULL;
AVStream * av_stream = NULL;
int error;
#ifdef DEBUG
g_debug ("VIDEO ENCODING:: frames per seconds:: %d", framesec);
g_debug ("VIDEO ENCODING:: extra frames every:: %d frame(s)", extraframes);
g_debug ("VIDEO ENCODING:: birate:: %d", bitrate);
g_debug ("VIDEO ENCODING:: video_x = %d , video_y = %d", video_res[0], video_res[1]);
g_debug ("VIDEO ENCODING:: codec:: %d, name= %s, ext= %s", codec, codec_name[codec], codec_list[codec]);
#endif // DEBUG
num_frames = view -> anim -> frames;
av_register_all ();
avcodec_register_all ();
if (! (format_context = avformat_alloc_context()))
{
g_warning ("MOVIE_ENCODING:: Could not allocate AV format context");
return FALSE;
}
// Guess the desired container format based on file extension
if (! (format_context -> oformat = av_guess_format (NULL, videofile, NULL)))
{
g_warning ("MOVIE_ENCODING:: Could not deduce container format: please change file name");
return FALSE;
}
//av_strlcpy (format_context -> filename, videofile, sizeof(format_context)->filename);
if (! (av_codec = avcodec_find_encoder (codec_id[codec])))
{
// Codec not found
g_warning ("MOVIE_ENCODING:: Could not find codec:: %s", codec_name[codec]);
return FALSE;
}
av_stream = add_video_stream (format_context,
av_codec,
framesec,
extraframes,
bitrate,
video_res);
if (! av_stream)
{
// Error impossible to allocate stream
g_warning ("MOVIE_ENCODING:: could not create video stream");
return FALSE;
}
if (codec_id[codec] == AV_CODEC_ID_H264) av_opt_set (av_stream -> priv_data, "preset", "slow", 0);
/*
Some container formats (like MP4) require global headers to be present
Mark the encoder so that it behaves accordingly.
*/
if (format_context -> oformat -> flags & AVFMT_GLOBALHEADER)
{
av_stream -> codec -> flags |= CODEC_FLAG_GLOBAL_HEADER;
}
/* open the codec */
if ((error = avcodec_open2 (av_stream -> codec, av_codec, NULL)) < 0)
{
// Can not open codec
g_warning ("MOVIE_ENCODING:: could not open codec, error= %s", av_err2str(error));
return FALSE;
}
#if LIBAVCODEC_VERSION_MAJOR > 52
av_dump_format (format_context, 0, videofile, 1);
#else
dump_format (av_format_context, 0, videofile, 1);
#endif
#if LIBAVCODEC_VERSION_MAJOR > 52
if (avio_open (& format_context -> pb, videofile, AVIO_FLAG_WRITE) < 0)
#else
if (url_fopen (& av_format_context -> pb, videofile, URL_WRONLY) < 0)
#endif
{
// error impossible to open output file
g_warning ("MOVIE_ENCODING:: Impossible to open the video file '%s'", videofile);
return FALSE;
}
#if LIBAVCODEC_VERSION_MAJOR > 52
if (avformat_write_header (format_context, NULL) < 0)
#else
if (av_set_parameters (av_format_context, NULL) < 0)
#endif
{
g_warning ("MOVIE_ENCODING:: Impossible to write the AV format header");
return FALSE;
}
AVFrame * picture = alloc_video_frame (av_stream -> codec -> width,
av_stream -> codec -> height);
// View is the name of a structure that contains the data I want to render
view -> anim -> last = view -> anim -> first;
int frame_id;
for (frame_id = 0; frame_id < num_frames; frame_id ++)
{
write_video_frame (format_context, av_stream, picture, frame_id, view);
if (frame_id < num_frames-1) view -> anim -> last = view -> anim -> last -> next;
}
av_write_trailer (format_context);
avcodec_close (av_stream -> codec);
/* free the streams */
for (i=0; i<format_context -> nb_streams; i++)
{
av_freep (& format_context -> streams[i] -> codec);
}
av_freep (& picture -> data[0]);
av_frame_free (& picture);
if (!(format_context -> flags & AVFMT_NOFILE))
{
/* close the output file */
#if LIBAVCODEC_VERSION_MAJOR > 52
avio_close (format_context -> pb);
#else
url_fclose (av_format_context -> pb);
#endif
}
/* free the stream */
av_free (format_context);
return TRUE;
}
More information about the Libav-user
mailing list