[Libav-user] Grabbing desktop at a framerate of 25 fps
Mathieu Thoman
thoman.mathieu at gmail.com
Wed Sep 17 09:13:46 CEST 2014
Hello everybody,
First sorry for my english.
I'm having some troubles with my code. I try to make a screencast at a rate
of 25 fps.
The first problem is that I can't reach the framerate I'd like to get (I
just have 8 or 9 images per seconds).
The second problem is that my program crash with an exception with
sws_scale.
Do you have any idea on how I can improve my code?
Here is what I could do.
*#define* __STDC_CONSTANT_MACROS 1
*extern* "C" {
*#include* "avcodec.h"
*#include* "avdevice.h"
*#include* "avformat.h"
*#include* "swscale.h"
}
*static* *int* jpeg_frame_number = 0;
*void* *SaveFrame*(AVFrame* pFrame, *int* dst_width, *int* dst_height, *int*
output_max_size)
{
AVCodecContext *png_encoder_codec_ctx = NULL;
AVCodec *png_encoder_codec = NULL;
uint8_t *png_buffer = NULL;
uint8_t *png_encoder_output_buffer = NULL;
size_t png_encoder_output_buffer_size = 0;
*int* err = 0;
AVPacket pkt;
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
*int* gotPacket;
png_encoder_codec = avcodec_find_encoder_by_name ("png");
*if*(png_encoder_codec == NULL)
*return*;
png_encoder_output_buffer = *static_cast*<uint8_t *>(av_malloc
(output_max_size));
*if* (png_encoder_output_buffer == NULL)
{
av_free (png_buffer);
*return*;
}
png_encoder_codec_ctx = avcodec_alloc_context3(png_encoder_codec);
*if* (png_encoder_codec_ctx == NULL)
{
av_free (png_encoder_output_buffer);
av_free (png_buffer);
*return*;
}
png_encoder_codec_ctx->width = dst_width;
png_encoder_codec_ctx->height = dst_height;
png_encoder_codec_ctx->pix_fmt = PIX_FMT_RGB24;
*if* (avcodec_open2(png_encoder_codec_ctx, png_encoder_codec, NULL) < 0)
{
av_free (png_encoder_codec_ctx);
av_free (png_encoder_output_buffer);
av_free (png_buffer);
*return*;
}
err = avcodec_encode_video2(png_encoder_codec_ctx, &pkt, pFrame, &
gotPacket);
*if*(err < 0)
{
avcodec_close (png_encoder_codec_ctx);
av_free (png_encoder_codec_ctx);
av_free (png_buffer);
}
FILE *pFile;
*char* szFilename[32];
// Open file
jpeg_frame_number++;
sprintf(szFilename, "D:/Temp/frame%06d.png", jpeg_frame_number);
pFile = fopen(szFilename, "wb");
*if*(pFile == NULL)
*return*;
// Write pixel data
fwrite(pkt.data, 1, pkt.size, pFile);
// Close file
fclose(pFile);
av_free_packet(&pkt);
}
*int* *main*(*int* argc, *char* **argv)
{
AVInputFormat* pInputFormat = NULL;
AVFormatContext* pFomartCtx = NULL;
AVDictionary* pOptions = NULL;
AVCodecContext* pCodecCtx = NULL;
AVCodec* pCodec = NULL;
AVFrame* pFrame = NULL;
AVFrame* pFrameRGB = NULL;
*struct* SwsContext *img_convert_ctx = NULL;
*int* i = 0;
*int* videoStream = -1;
*int* frameFinished = 0;
*int* numBytes = 0;
uint8_t *buffer = NULL;
AVPacket packet;
av_init_packet(&packet);
packet.data = NULL;
packet.size = 0;
// Register all formats and codecs
av_register_all();
avdevice_register_all();
// av_log_set_level(-1);
*if*(!(pInputFormat = av_find_input_format("gdigrab")))
{
fprintf(stderr, "Couldn't get input format. \n");
*return* -1;
}
// Set up options
av_dict_set(&pOptions, "video_size", "1280x970", CODEC_FLAG_QSCALE);
// Open desktop
*if*(avformat_open_input(&pFomartCtx, "desktop", pInputFormat, &pOptions)
!= 0)
{
fprintf(stderr, "Couldn't open desktop input.\n");
*return* -1;
}
// Find the decoder for the video stream
pCodec = avcodec_find_decoder(AV_CODEC_ID_BMP);
*if*(pCodec == NULL)
{
fprintf(stderr, "Codec not found!\n");
*return* -1;
}
pCodecCtx = avcodec_alloc_context3(pCodec);
*if*(pCodecCtx == NULL)
{
fprintf(stderr, "Codec context not allocated!\n");
*return* -1;
}
// Open codec
*if*(avcodec_open2(pCodecCtx, pCodec, NULL) < 0)
{
fprintf(stderr, "Could not open codec\n");
*return* -1;
}
// Allocate video frame
pFrame = avcodec_alloc_frame();
pFrameRGB = avcodec_alloc_frame();
*if*(pFrameRGB == NULL || pFrame == NULL)
*return* -1;
// Read frames and save frames to disk
// avio_read();
// avcodec_flush_buffers();
*while*((av_read_frame(pFomartCtx, &packet) >= 0))
{
// Decode video frame
avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
// Did we get a video frame?
*if*(frameFinished)
{
// Convert the image from its native format to RGB
*int* width = pCodecCtx->width;
*int* height = pCodecCtx->height;
// Determine required buffer size and allocate buffer
numBytes = avpicture_get_size(AV_PIX_FMT_RGB24, width, height);
buffer = *static_cast*<uint8_t *>(av_malloc(numBytes * *sizeof*
(uint8_t)));
// Assign appropriate parts of buffer to image planes in pFrameRGB
// Note that pFrameRGB is an AVFrame, but AVFrame is a superset
// of AVPicture
avpicture_fill((AVPicture *)pFrameRGB, buffer, AV_PIX_FMT_RGB24,
width, height);
img_convert_ctx = sws_getContext(width, height, pCodecCtx->pix_fmt,
width, height, AV_PIX_FMT_RGB24, SWS_BILINEAR, NULL, NULL, NULL);
sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize, 0,
height, pFrameRGB->data, pFrameRGB->linesize);
// Save the frame to disk
SaveFrame(pFrameRGB, width, height, numBytes);
}
// Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
}
sws_freeContext(img_convert_ctx);
av_free(buffer);
// Free the RGB image
av_free(pFrameRGB);
// Free the YUV frame
av_free(pFrame);
// Close the codec
avcodec_close(pCodecCtx);
// Close the video file
avformat_close_input(&pFomartCtx);
av_dict_free(&pOptions);
av_free_packet(&packet);
}
Thank you very much.
Math
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <https://ffmpeg.org/pipermail/libav-user/attachments/20140917/d92a3941/attachment.html>
More information about the Libav-user
mailing list