[Libav-user] Windows Media Player fails to play MP4 with hardware encoded H264 data.
JJ Liu
jjliu.hk at gmail.com
Tue Oct 6 11:19:13 CEST 2015
Thanks for your response.
Following is diff between my modified file (main.c) and original one
(muxing.c).
--- main.c 2015-09-25 12:15:20.000000000 +0800
+++ muxing.c 2015-09-16 14:57:53.000000000 +0800
@@ -34,7 +34,6 @@
#include <string.h>
#include <math.h>
-#include <libavcodec/avcodec.h>
#include <libavutil/avassert.h>
#include <libavutil/channel_layout.h>
#include <libavutil/opt.h>
@@ -50,62 +49,6 @@
#define SCALE_FLAGS SWS_BICUBIC
-// JJ Start
-
-#include <fcntl.h>
-#include <sys/stat.h>
-#define CLEAR(x) memset(&(x), 0, sizeof(x))
-
-
-#include "ait_capture.h"
-
-
-int video_handle;
-StreamFormat gVFmt;
-
-int open_video_device(const char* dev_name)
-{
- struct stat st;
- int handle;
- printf("%s(%s) Enter\n",__FUNCTION__,__FILE__);
-
- CLEAR(st);
-
- if (!dev_name)
- {
- printf("%s(%s) Missing Device Name\n",__FUNCTION__,__FILE__);
- }
- else
- {
- if (stat(dev_name,&st)==-1)
- {
- printf( "%s(%s) Can't Identify device %s\n",__FUNCTION__,__FILE__,
dev_name);
- }
- else
- {
- if (!S_ISCHR(st.st_mode))
- {
- printf("%s(%s) %s is not a device\n",__FUNCTION__,__FILE__, dev_name);
- }
- else
- {
- handle=open(dev_name, O_RDWR|O_NONBLOCK,0);
- if (handle<0)
- {
- printf( "%s(%s) Cannot open device
%s\n",__FUNCTION__,__FILE__,dev_name);
- }
- else
- return handle;
- }
- }
- }
- exit(0);
- return -1;
-}
-
-// JJ End
-
-
// a wrapper around a single output AVStream
typedef struct OutputStream {
AVStream *st;
@@ -126,7 +69,6 @@
static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt)
{
AVRational *time_base =
&fmt_ctx->streams[pkt->stream_index]->time_base;
- printf("%s(%s) Enter\n",__FUNCTION__,__FILE__);
printf("pts:%s pts_time:%s dts:%s dts_time:%s duration:%s
duration_time:%s stream_index:%d\n",
av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
@@ -137,7 +79,6 @@
static int write_frame(AVFormatContext *fmt_ctx, const AVRational
*time_base, AVStream *st, AVPacket *pkt)
{
- printf("%s(%s) Enter\n",__FUNCTION__,__FILE__);
/* rescale output packet timestamp values from codec to stream
timebase */
av_packet_rescale_ts(pkt, *time_base, st->time_base);
pkt->stream_index = st->index;
@@ -154,20 +95,13 @@
{
AVCodecContext *c;
int i;
- printf("%s(%s) Enter\n",__FUNCTION__,__FILE__);
/* find the encoder */
- printf("Try to find encoder for '%s'\n",
- avcodec_get_name(codec_id));
-
*codec = avcodec_find_encoder(codec_id);
- if (codec_id!=AV_CODEC_ID_H264)
- {
- if (!(*codec)) {
- fprintf(stderr, "Could not find encoder for '%s'\n",
- avcodec_get_name(codec_id));
- exit(1);
- }
+ if (!(*codec)) {
+ fprintf(stderr, "Could not find encoder for '%s'\n",
+ avcodec_get_name(codec_id));
+ exit(1);
}
ost->st = avformat_new_stream(oc, *codec);
@@ -178,16 +112,7 @@
ost->st->id = oc->nb_streams-1;
c = ost->st->codec;
- if (codec_id==AV_CODEC_ID_H264)
- {
- c->codec_type=AVMEDIA_TYPE_VIDEO;
- c->bit_rate=gVFmt.bitrate;
- c->extradata_size=0;
- c->extradata = NULL;
- }
-
-// switch ((*codec)->type) {
- switch (c->codec_type) {
+ switch ((*codec)->type) {
case AVMEDIA_TYPE_AUDIO:
c->sample_fmt = (*codec)->sample_fmts ?
(*codec)->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
@@ -218,16 +143,8 @@
c->bit_rate = 400000;
/* Resolution must be a multiple of two. */
-// c->width = 352;
-// c->height = 288;
- c->width = gVFmt.width;
- c->height = gVFmt.height;
- c->me_range = 16;
- c->max_qdiff = 4;
- c->qmin = 10;
- c->qmax = 36;
- c->qcompress = 0.6f;
-
+ c->width = 352;
+ c->height = 288;
/* timebase: This is the fundamental unit of time (in seconds) in
terms
* of which frame timestamps are represented. For fixed-fps
content,
* timebase should be 1/framerate and timestamp increments should
be
@@ -235,8 +152,7 @@
ost->st->time_base = (AVRational){ 1, STREAM_FRAME_RATE };
c->time_base = ost->st->time_base;
-// c->gop_size = 12; /* emit one intra frame every twelve
frames at most */
- c->gop_size = gVFmt.gop; /* emit one intra frame every twelve
frames at most */
+ c->gop_size = 12; /* emit one intra frame every twelve frames
at most */
c->pix_fmt = STREAM_PIX_FMT;
if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
/* just for testing, we also add B frames */
@@ -268,7 +184,6 @@
{
AVFrame *frame = av_frame_alloc();
int ret;
- printf("%s(%s) Enter\n",__FUNCTION__,__FILE__);
if (!frame) {
fprintf(stderr, "Error allocating an audio frame\n");
@@ -283,10 +198,6 @@
if (nb_samples) {
ret = av_frame_get_buffer(frame, 0);
if (ret < 0) {
- fprintf(stderr, "frame->format=%d\n",frame->format);
- fprintf(stderr, "frame->nb_sample=%d\n",frame->nb_samples);
- fprintf(stderr, "frame->channel_layout=%llu\n",frame->channel_layout);
- fprintf(stderr, "ret=%d\n",ret);
fprintf(stderr, "Error allocating an audio buffer\n");
exit(1);
}
@@ -301,7 +212,6 @@
int nb_samples;
int ret;
AVDictionary *opt = NULL;
- printf("%s(%s) Enter\n",__FUNCTION__,__FILE__);
c = ost->st->codec;
@@ -359,7 +269,6 @@
AVFrame *frame = ost->tmp_frame;
int j, i, v;
int16_t *q = (int16_t*)frame->data[0];
- printf("%s(%s) Enter\n",__FUNCTION__,__FILE__);
/* check if we want to generate more frames */
if (av_compare_ts(ost->next_pts, ost->st->codec->time_base,
@@ -367,8 +276,7 @@
return NULL;
for (j = 0; j <frame->nb_samples; j++) {
- //v = (int)(sin(ost->t) * 10000);
- v=j<frame->nb_samples/2?10000:0;
+ v = (int)(sin(ost->t) * 10000);
for (i = 0; i < ost->st->codec->channels; i++)
*q++ = v;
ost->t += ost->tincr;
@@ -393,7 +301,6 @@
int ret;
int got_packet;
int dst_nb_samples;
- printf("%s(%s) Enter\n",__FUNCTION__,__FILE__);
av_init_packet(&pkt);
c = ost->st->codec;
@@ -454,7 +361,6 @@
{
AVFrame *picture;
int ret;
- printf("%s(%s) Enter\n",__FUNCTION__,__FILE__);
picture = av_frame_alloc();
if (!picture)
@@ -479,7 +385,6 @@
int ret;
AVCodecContext *c = ost->st->codec;
AVDictionary *opt = NULL;
- printf("%s(%s) Enter\n",__FUNCTION__,__FILE__);
av_dict_copy(&opt, opt_arg, 0);
@@ -488,13 +393,14 @@
av_dict_free(&opt);
if (ret < 0) {
fprintf(stderr, "Could not open video codec: %s\n",
av_err2str(ret));
- //exit(1);
+ exit(1);
}
+ /* allocate and init a re-usable frame */
ost->frame = alloc_picture(c->pix_fmt, c->width, c->height);
if (!ost->frame) {
fprintf(stderr, "Could not allocate video frame\n");
- //exit(1);
+ exit(1);
}
/* If the output format is not YUV420P, then a temporary YUV420P
@@ -504,31 +410,10 @@
if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
ost->tmp_frame = alloc_picture(AV_PIX_FMT_YUV420P, c->width,
c->height);
if (!ost->tmp_frame) {
- printf("Could not allocate temporary picture\n");
+ fprintf(stderr, "Could not allocate temporary picture\n");
exit(1);
}
}
-
-// video_handle=open_video_device("/dev/video0");
-// printf("Video Device Handle %d\n",video_handle);
- if (ConfigureDev(&gVFmt)<0)
- {
- printf("%s(%s) - Fail to open device\n",__FUNCTION__,__FILE__);
- exit(0);
- }
- if (gVFmt.bitrate)
- SetBitrate(gVFmt.stream_id, gVFmt.bitrate/1000);
- if(gVFmt.gop)
- SetGOP(gVFmt.stream_id, gVFmt.gop);
- /* Mirror flip */
- if (gVFmt.ortn)
- SetMirrFlip(gVFmt.stream_id, gVFmt.ortn);
-
- if (StartCapturing(gVFmt.stream_id)<0)
- {
- printf("%s(%s) - Fail to start capturing\n",__FUNCTION__,__FILE__);
- exit(0);
- }
}
/* Prepare a dummy image. */
@@ -536,7 +421,6 @@
int width, int height)
{
int x, y, i, ret;
- printf("%s(%s) Enter\n",__FUNCTION__,__FILE__);
/* when we pass a frame to the encoder, it may keep a reference to it
* internally;
@@ -565,7 +449,6 @@
static AVFrame *get_video_frame(OutputStream *ost)
{
AVCodecContext *c = ost->st->codec;
- printf("%s(%s) Enter\n",__FUNCTION__,__FILE__);
/* check if we want to generate more frames */
if (av_compare_ts(ost->next_pts, ost->st->codec->time_base,
@@ -575,7 +458,6 @@
if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
/* as we only generate a YUV420P picture, we must convert it
* to the codec pixel format if needed */
-#if 0
if (!ost->sws_ctx) {
ost->sws_ctx = sws_getContext(c->width, c->height,
AV_PIX_FMT_YUV420P,
@@ -592,7 +474,6 @@
sws_scale(ost->sws_ctx,
(const uint8_t * const *)ost->tmp_frame->data,
ost->tmp_frame->linesize,
0, c->height, ost->frame->data, ost->frame->linesize);
-#endif
} else {
fill_yuv_image(ost->frame, ost->next_pts, c->width, c->height);
}
@@ -610,33 +491,12 @@
{
int ret;
AVCodecContext *c;
- AVFrame *frame=NULL;
+ AVFrame *frame;
int got_packet = 0;
- printf("%s(%s) Enter\n",__FUNCTION__,__FILE__);
-
- FrameBuf *fra_buf = NULL;
c = ost->st->codec;
-
- if (c->codec_id==AV_CODEC_ID_H264)
- {
- fra_buf = AllocBuffer(0);
- if (!fra_buf)
- {
- printf( "%s(%s) Cannot allocate buffer at Line
%d\n",__FUNCTION__,__FILE__,__LINE__);
- return 1;
- }
- if (CaptureFrame(gVFmt.stream_id, fra_buf)==0)
- {
- }
- ost->frame->pts = ost->next_pts++;
- }
- else
- {
- printf( "%s(%s) Line: %d\n",__FUNCTION__,__FILE__,__LINE__);
- frame = get_video_frame(ost);
- }
+ frame = get_video_frame(ost);
if (oc->oformat->flags & AVFMT_RAWPICTURE) {
/* a hack to avoid data copy with some raw video muxers */
@@ -648,17 +508,9 @@
pkt.flags |= AV_PKT_FLAG_KEY;
pkt.stream_index = ost->st->index;
+ pkt.data = (uint8_t *)frame;
+ pkt.size = sizeof(AVPicture);
- if (c->codec_id==AV_CODEC_ID_H264)
- {
- pkt.data = (uint8_t *)fra_buf->buf;
- pkt.size = fra_buf->buf_used;
- }
- else
- {
- pkt.data = (uint8_t *)frame;
- pkt.size = sizeof(AVPicture);
- }
pkt.pts = pkt.dts = frame->pts;
av_packet_rescale_ts(&pkt, c->time_base, ost->st->time_base);
@@ -666,36 +518,19 @@
} else {
AVPacket pkt = { 0 };
av_init_packet(&pkt);
- if (c->codec_id==AV_CODEC_ID_H264)
- {
- if (IsKeyFrame(fra_buf->buf,fra_buf->buf_used)==1)
- pkt.flags |= AV_PKT_FLAG_KEY;
- pkt.stream_index = ost->st->index;
- pkt.data = (uint8_t *)fra_buf->buf;
- pkt.size = fra_buf->buf_used;
- //pkt.pts = pkt.dts = ost->frame->pts;
- pkt.pts = pkt.dts = AV_NOPTS_VALUE;
- av_packet_rescale_ts(&pkt, c->time_base, ost->st->time_base);
- ret = av_interleaved_write_frame(oc, &pkt);
- //ret = write_frame(oc, &c->time_base, ost->st, &pkt);
- FreeBuffer(fra_buf);
- got_packet=1;
- }
- else
- {
- printf( "%s(%s) Line: %d\n",__FUNCTION__,__FILE__,__LINE__);
- /* encode the image */
- ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
- if (ret < 0) {
- fprintf(stderr, "Error encoding video frame: %s\n",
av_err2str(ret));
- exit(1);
- }
- if (got_packet) {
- ret = write_frame(oc, &c->time_base, ost->st, &pkt);
- } else {
- ret = 0;
- }
- }
+
+ /* encode the image */
+ ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
+ if (ret < 0) {
+ fprintf(stderr, "Error encoding video frame: %s\n",
av_err2str(ret));
+ exit(1);
+ }
+
+ if (got_packet) {
+ ret = write_frame(oc, &c->time_base, ost->st, &pkt);
+ } else {
+ ret = 0;
+ }
}
if (ret < 0) {
@@ -708,11 +543,10 @@
static void close_stream(AVFormatContext *oc, OutputStream *ost)
{
- printf("%s(%s) Enter\n",__FUNCTION__,__FILE__);
avcodec_close(ost->st->codec);
av_frame_free(&ost->frame);
av_frame_free(&ost->tmp_frame);
-// sws_freeContext(ost->sws_ctx);
+ sws_freeContext(ost->sws_ctx);
swr_free(&ost->swr_ctx);
}
@@ -731,9 +565,6 @@
int encode_video = 0, encode_audio = 0;
AVDictionary *opt = NULL;
- printf("%s(%s) Enter\n",__FUNCTION__,__FILE__);
-
-
int i=0;
/* Initialize libavcodec, and register all codecs and formats. */
av_register_all();
@@ -750,10 +581,9 @@
}
filename = argv[1];
-// JJ 20150922 No need to do it
-// if (argc > 3 && !strcmp(argv[2], "-flags")) {
-// av_dict_set(&opt, argv[2]+1, argv[3], 0);
-// }
+ if (argc > 3 && !strcmp(argv[2], "-flags")) {
+ av_dict_set(&opt, argv[2]+1, argv[3], 0);
+ }
/* allocate the output media context */
avformat_alloc_output_context2(&oc, NULL, NULL, filename);
@@ -766,18 +596,6 @@
fmt = oc->oformat;
fmt->audio_codec=AV_CODEC_ID_MP2;
- fmt->video_codec=AV_CODEC_ID_H264;
-
- strcpy(gVFmt.dev_name,"/dev/video0");
- gVFmt.stream_id=SID_RECORD;
- gVFmt.pixFmt=V4L2_PIX_FMT_H264;
- gVFmt.width=1920;
- gVFmt.height=1080;
- gVFmt.framerate=30;
- gVFmt.gop=30;
- gVFmt.bitrate=7168*1000;
- gVFmt.ortn=3;
-
/* Add the audio and video streams using the default format codecs
* and initialize the codecs. */
if (fmt->video_codec != AV_CODEC_ID_NONE) {
@@ -829,9 +647,8 @@
} else {
encode_audio = !write_audio_frame(oc, &audio_st);
}
- if (!encode_audio)
+ if (i>30*20)
break;
-
}
/* Write the trailer, if any. The trailer must be written before you
On Tue, Oct 6, 2015 at 4:08 PM, Carl Eugen Hoyos <cehoyos at ag.or.at> wrote:
> JJ Liu <jjliu.hk at ...> writes:
>
> > Following is my modified muxing.c
>
> If you send an unified diff instead, there is
> at least a chance that somebody can help...
>
> Carl Eugen
>
> _______________________________________________
> Libav-user mailing list
> Libav-user at ffmpeg.org
> http://ffmpeg.org/mailman/listinfo/libav-user
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://ffmpeg.org/pipermail/libav-user/attachments/20151006/2ba5f434/attachment.html>
More information about the Libav-user
mailing list