[FFmpeg-cvslog] avcodec/mpegvideoenc: Move ratecontrol to MPVMainEncContext
Andreas Rheinhardt
git at videolan.org
Wed Mar 26 06:08:38 EET 2025
ffmpeg | branch: master | Andreas Rheinhardt <andreas.rheinhardt at outlook.com> | Fri Jan 28 20:43:54 2022 +0100| [d8c7925e3291e930fcd83b57719acbc3a6ff3697] | committer: Andreas Rheinhardt
avcodec/mpegvideoenc: Move ratecontrol to MPVMainEncContext
Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt at outlook.com>
> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=d8c7925e3291e930fcd83b57719acbc3a6ff3697
---
libavcodec/mpegvideo.h | 8 -----
libavcodec/mpegvideo_enc.c | 73 ++++++++++++++++++++++-------------------
libavcodec/mpegvideoenc.h | 10 +++++-
libavcodec/ratecontrol.c | 75 +++++++++++++++++++++++-------------------
libavcodec/ratecontrol.h | 12 +++----
libavcodec/snowenc.c | 82 ++++++++++++++++++++++++----------------------
6 files changed, 140 insertions(+), 120 deletions(-)
diff --git a/libavcodec/mpegvideo.h b/libavcodec/mpegvideo.h
index 1091c93d95..75b1fff245 100644
--- a/libavcodec/mpegvideo.h
+++ b/libavcodec/mpegvideo.h
@@ -42,7 +42,6 @@
#include "mpegvideoencdsp.h"
#include "pixblockdsp.h"
#include "put_bits.h"
-#include "ratecontrol.h"
#include "qpeldsp.h"
#include "videodsp.h"
@@ -327,13 +326,6 @@ typedef struct MpegEncContext {
int dct_count[2];
uint16_t (*dct_offset)[64];
- /* bit rate control */
- int64_t total_bits;
- int frame_bits; ///< bits used for the current frame
- int stuffing_bits; ///< bits used for stuffing
- int next_lambda; ///< next lambda used for retrying to encode a frame
- RateControlContext rc_context; ///< contains stuff only accessed in ratecontrol.c
-
/* statistics, used for 2-pass encoding */
int mv_bits;
int header_bits;
diff --git a/libavcodec/mpegvideo_enc.c b/libavcodec/mpegvideo_enc.c
index ffa9484669..7f24c1baa6 100644
--- a/libavcodec/mpegvideo_enc.c
+++ b/libavcodec/mpegvideo_enc.c
@@ -84,7 +84,7 @@
#define QMAT_SHIFT_MMX 16
#define QMAT_SHIFT 21
-static int encode_picture(MpegEncContext *s, const AVPacket *pkt);
+static int encode_picture(MPVMainEncContext *const s, const AVPacket *pkt);
static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
static int sse_mb(MpegEncContext *s);
static void denoise_dct_c(MpegEncContext *s, int16_t *block);
@@ -1008,7 +1008,8 @@ av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
#endif
}
- if ((ret = ff_rate_control_init(s)) < 0)
+ ret = ff_rate_control_init(m);
+ if (ret < 0)
return ret;
if (m->b_frame_strategy == 2) {
@@ -1043,7 +1044,7 @@ av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
MPVMainEncContext *const m = avctx->priv_data;
MpegEncContext *const s = &m->s;
- ff_rate_control_uninit(&s->rc_context);
+ ff_rate_control_uninit(&m->rc_context);
ff_mpv_common_end(s);
av_refstruct_pool_uninit(&s->picture_pool);
@@ -1622,7 +1623,7 @@ static int set_bframe_chain_length(MPVMainEncContext *const m)
// FIXME check that the gop check above is +-1 correct
av_refstruct_unref(&s->input_picture[0]);
- ff_vbv_update(s, 0);
+ ff_vbv_update(m, 0);
return 0;
}
@@ -1642,15 +1643,15 @@ static int set_bframe_chain_length(MPVMainEncContext *const m)
for (int i = 0; i < s->max_b_frames + 1; i++) {
int pict_num = s->input_picture[0]->display_picture_number + i;
- if (pict_num >= s->rc_context.num_entries)
+ if (pict_num >= m->rc_context.num_entries)
break;
if (!s->input_picture[i]) {
- s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
+ m->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
break;
}
s->input_picture[i]->f->pict_type =
- s->rc_context.entry[pict_num].new_pict_type;
+ m->rc_context.entry[pict_num].new_pict_type;
}
}
@@ -1918,7 +1919,7 @@ int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
//emms_c();
frame_start(s);
vbv_retry:
- ret = encode_picture(s, pkt);
+ ret = encode_picture(m, pkt);
if (growing_buffer) {
av_assert0(s->pb.buf == avctx->internal->byte_buffer);
pkt->data = s->pb.buf;
@@ -1933,14 +1934,14 @@ vbv_retry:
ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
if (avctx->rc_buffer_size) {
- RateControlContext *rcc = &s->rc_context;
+ RateControlContext *rcc = &m->rc_context;
int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
int hq = (avctx->mb_decision == FF_MB_DECISION_RD || avctx->trellis);
int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
if (put_bits_count(&s->pb) > max_size &&
s->lambda < s->lmax) {
- s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
+ m->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
(s->qscale + 1) / s->qscale);
if (s->adaptive_quant) {
int i;
@@ -1968,7 +1969,7 @@ vbv_retry:
}
if (avctx->flags & AV_CODEC_FLAG_PASS1)
- ff_write_pass1_stats(s);
+ ff_write_pass1_stats(m);
for (int i = 0; i < MPV_MAX_PLANES; i++)
avctx->error[i] += s->encoding_error[i];
@@ -1982,10 +1983,10 @@ vbv_retry:
s->misc_bits + s->i_tex_bits +
s->p_tex_bits);
flush_put_bits(&s->pb);
- s->frame_bits = put_bits_count(&s->pb);
+ m->frame_bits = put_bits_count(&s->pb);
- stuffing_count = ff_vbv_update(s, s->frame_bits);
- s->stuffing_bits = 8*stuffing_count;
+ stuffing_count = ff_vbv_update(m, m->frame_bits);
+ m->stuffing_bits = 8*stuffing_count;
if (stuffing_count) {
if (put_bytes_left(&s->pb, 0) < stuffing_count + 50) {
av_log(avctx, AV_LOG_ERROR, "stuffing too large\n");
@@ -2009,10 +2010,10 @@ vbv_retry:
break;
default:
av_log(avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
- s->stuffing_bits = 0;
+ m->stuffing_bits = 0;
}
flush_put_bits(&s->pb);
- s->frame_bits = put_bits_count(&s->pb);
+ m->frame_bits = put_bits_count(&s->pb);
}
/* update MPEG-1/2 vbv_delay for CBR */
@@ -2027,9 +2028,9 @@ vbv_retry:
int vbv_delay, min_delay;
double inbits = avctx->rc_max_rate *
av_q2d(avctx->time_base);
- int minbits = s->frame_bits - 8 *
+ int minbits = m->frame_bits - 8 *
(s->vbv_delay_pos - 1);
- double bits = s->rc_context.buffer_index + minbits - inbits;
+ double bits = m->rc_context.buffer_index + minbits - inbits;
uint8_t *const vbv_delay_ptr = s->pb.buf + s->vbv_delay_pos;
if (bits < 0)
@@ -2064,7 +2065,7 @@ vbv_retry:
return ret;
}
}
- s->total_bits += s->frame_bits;
+ m->total_bits += m->frame_bits;
pkt->pts = s->cur_pic.ptr->f->pts;
pkt->duration = s->cur_pic.ptr->f->duration;
@@ -2089,14 +2090,14 @@ vbv_retry:
if (s->mb_info)
av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
} else {
- s->frame_bits = 0;
+ m->frame_bits = 0;
}
ff_mpv_unref_picture(&s->cur_pic);
- av_assert1((s->frame_bits & 7) == 0);
+ av_assert1((m->frame_bits & 7) == 0);
- pkt->size = s->frame_bits / 8;
+ pkt->size = m->frame_bits / 8;
*got_packet = !!pkt->size;
return 0;
}
@@ -3611,12 +3612,15 @@ static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src)
flush_put_bits(&dst->pb);
}
-static int estimate_qp(MpegEncContext *s, int dry_run){
- if (s->next_lambda){
- s->cur_pic.ptr->f->quality = s->next_lambda;
- if(!dry_run) s->next_lambda= 0;
+static int estimate_qp(MPVMainEncContext *const m, int dry_run)
+{
+ MpegEncContext *const s = &m->s;
+
+ if (m->next_lambda){
+ s->cur_pic.ptr->f->quality = m->next_lambda;
+ if(!dry_run) m->next_lambda= 0;
} else if (!s->fixed_qscale) {
- int quality = ff_rate_estimate_qscale(s, dry_run);
+ int quality = ff_rate_estimate_qscale(m, dry_run);
s->cur_pic.ptr->f->quality = quality;
if (s->cur_pic.ptr->f->quality < 0)
return -1;
@@ -3661,8 +3665,9 @@ static void set_frame_distances(MpegEncContext * s){
}
}
-static int encode_picture(MpegEncContext *s, const AVPacket *pkt)
+static int encode_picture(MPVMainEncContext *const m, const AVPacket *pkt)
{
+ MpegEncContext *const s = &m->s;
int i, ret;
int bits;
int context_count = s->slice_context_count;
@@ -3689,9 +3694,10 @@ static int encode_picture(MpegEncContext *s, const AVPacket *pkt)
}
if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
- if (estimate_qp(s,1) < 0)
- return -1;
- ff_get_2pass_fcode(s);
+ ret = estimate_qp(m, 1);
+ if (ret < 0)
+ return ret;
+ ff_get_2pass_fcode(m);
} else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
if(s->pict_type==AV_PICTURE_TYPE_B)
s->lambda= s->last_lambda_for[s->pict_type];
@@ -3814,8 +3820,9 @@ static int encode_picture(MpegEncContext *s, const AVPacket *pkt)
}
}
- if (estimate_qp(s, 0) < 0)
- return -1;
+ ret = estimate_qp(m, 0);
+ if (ret < 0)
+ return ret;
if (s->qscale < 3 && s->max_qcoeff <= 128 &&
s->pict_type == AV_PICTURE_TYPE_I &&
diff --git a/libavcodec/mpegvideoenc.h b/libavcodec/mpegvideoenc.h
index 7b5635e50e..c1d2821038 100644
--- a/libavcodec/mpegvideoenc.h
+++ b/libavcodec/mpegvideoenc.h
@@ -32,6 +32,7 @@
#include "libavutil/opt.h"
#include "mpegvideo.h"
+#include "ratecontrol.h"
#define MPVENC_MAX_B_FRAMES 16
@@ -43,6 +44,13 @@ typedef struct MPVMainEncContext {
int b_frame_strategy;
int b_sensitivity;
int brd_scale;
+
+ /* bit rate control */
+ int64_t total_bits;
+ int frame_bits; ///< bits used for the current frame
+ int stuffing_bits; ///< bits used for stuffing
+ int next_lambda; ///< next lambda used for retrying to encode a frame
+ RateControlContext rc_context; ///< contains stuff only accessed in ratecontrol.c
} MPVMainEncContext;
#define MAX_FCODE 7
@@ -94,7 +102,7 @@ typedef struct MPVMainEncContext {
#define FF_MPV_OFFSET(x) offsetof(MpegEncContext, x)
#define FF_MPV_MAIN_OFFSET(x) offsetof(MPVMainEncContext, x)
-#define FF_RC_OFFSET(x) offsetof(MpegEncContext, rc_context.x)
+#define FF_RC_OFFSET(x) offsetof(MPVMainEncContext, rc_context.x)
#define FF_MPV_OPT_FLAGS (AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM)
#define FF_MPV_COMMON_OPTS \
FF_MPV_OPT_CMP_FUNC, \
diff --git a/libavcodec/ratecontrol.c b/libavcodec/ratecontrol.c
index f5e0b70a9a..ebc3a30fc1 100644
--- a/libavcodec/ratecontrol.c
+++ b/libavcodec/ratecontrol.c
@@ -35,8 +35,9 @@
#include "mpegvideoenc.h"
#include "libavutil/eval.h"
-void ff_write_pass1_stats(MpegEncContext *s)
+void ff_write_pass1_stats(MPVMainEncContext *const m)
{
+ const MpegEncContext *const s = &m->s;
snprintf(s->avctx->stats_out, 256,
"in:%d out:%d type:%d q:%d itex:%d ptex:%d mv:%d misc:%d "
"fcode:%d bcode:%d mc-var:%"PRId64" var:%"PRId64" icount:%d hbits:%d;\n",
@@ -101,9 +102,10 @@ static double bits2qp_cb(void *rce, double qp)
return bits2qp(rce, qp);
}
-static double get_diff_limited_q(MpegEncContext *s, const RateControlEntry *rce, double q)
+static double get_diff_limited_q(MPVMainEncContext *m, const RateControlEntry *rce, double q)
{
- RateControlContext *rcc = &s->rc_context;
+ MpegEncContext *const s = &m->s;
+ RateControlContext *const rcc = &m->rc_context;
AVCodecContext *a = s->avctx;
const int pict_type = rce->new_pict_type;
const double last_p_q = rcc->last_qscale_for[AV_PICTURE_TYPE_P];
@@ -168,10 +170,11 @@ static void get_qminmax(int *qmin_ret, int *qmax_ret, MpegEncContext *s, int pic
*qmax_ret = qmax;
}
-static double modify_qscale(MpegEncContext *s, const RateControlEntry *rce,
+static double modify_qscale(MPVMainEncContext *const m, const RateControlEntry *rce,
double q, int frame_num)
{
- RateControlContext *rcc = &s->rc_context;
+ MpegEncContext *const s = &m->s;
+ RateControlContext *const rcc = &m->rc_context;
const double buffer_size = s->avctx->rc_buffer_size;
const double fps = get_fps(s->avctx);
const double min_rate = s->avctx->rc_min_rate / fps;
@@ -259,10 +262,11 @@ static double modify_qscale(MpegEncContext *s, const RateControlEntry *rce,
/**
* Modify the bitrate curve from pass1 for one frame.
*/
-static double get_qscale(MpegEncContext *s, RateControlEntry *rce,
+static double get_qscale(MPVMainEncContext *const m, RateControlEntry *rce,
double rate_factor, int frame_num)
{
- RateControlContext *rcc = &s->rc_context;
+ MpegEncContext *const s = &m->s;
+ RateControlContext *rcc = &m->rc_context;
AVCodecContext *a = s->avctx;
const int pict_type = rce->new_pict_type;
const double mb_num = s->mb_num;
@@ -332,9 +336,10 @@ static double get_qscale(MpegEncContext *s, RateControlEntry *rce,
return q;
}
-static int init_pass2(MpegEncContext *s)
+static int init_pass2(MPVMainEncContext *const m)
{
- RateControlContext *rcc = &s->rc_context;
+ RateControlContext *const rcc = &m->rc_context;
+ MpegEncContext *const s = &m->s;
AVCodecContext *a = s->avctx;
int i, toobig;
AVRational fps = get_fpsQ(s->avctx);
@@ -393,7 +398,7 @@ static int init_pass2(MpegEncContext *s)
for (i = 0; i < rcc->num_entries; i++) {
const RateControlEntry *rce = &rcc->entry[i];
- qscale[i] = get_qscale(s, &rcc->entry[i], rate_factor, i);
+ qscale[i] = get_qscale(m, &rcc->entry[i], rate_factor, i);
rcc->last_qscale_for[rce->pict_type] = qscale[i];
}
av_assert0(filter_size % 2 == 1);
@@ -402,13 +407,13 @@ static int init_pass2(MpegEncContext *s)
for (i = FFMAX(0, rcc->num_entries - 300); i < rcc->num_entries; i++) {
const RateControlEntry *rce = &rcc->entry[i];
- qscale[i] = get_diff_limited_q(s, rce, qscale[i]);
+ qscale[i] = get_diff_limited_q(m, rce, qscale[i]);
}
for (i = rcc->num_entries - 1; i >= 0; i--) {
const RateControlEntry *rce = &rcc->entry[i];
- qscale[i] = get_diff_limited_q(s, rce, qscale[i]);
+ qscale[i] = get_diff_limited_q(m, rce, qscale[i]);
}
/* smooth curve */
@@ -438,10 +443,10 @@ static int init_pass2(MpegEncContext *s)
RateControlEntry *rce = &rcc->entry[i];
double bits;
- rce->new_qscale = modify_qscale(s, rce, blurred_qscale[i], i);
+ rce->new_qscale = modify_qscale(m, rce, blurred_qscale[i], i);
bits = qp2bits(rce, rce->new_qscale) + rce->mv_bits + rce->misc_bits;
- bits += 8 * ff_vbv_update(s, bits);
+ bits += 8 * ff_vbv_update(m, bits);
rce->expected_bits = expected_bits;
expected_bits += bits;
@@ -494,9 +499,10 @@ static int init_pass2(MpegEncContext *s)
return 0;
}
-av_cold int ff_rate_control_init(MpegEncContext *s)
+av_cold int ff_rate_control_init(MPVMainEncContext *const m)
{
- RateControlContext *rcc = &s->rc_context;
+ MpegEncContext *const s = &m->s;
+ RateControlContext *rcc = &m->rc_context;
int i, res;
static const char * const const_names[] = {
"PI",
@@ -632,7 +638,7 @@ av_cold int ff_rate_control_init(MpegEncContext *s)
p = next;
}
- res = init_pass2(s);
+ res = init_pass2(m);
if (res < 0)
return res;
}
@@ -686,7 +692,7 @@ av_cold int ff_rate_control_init(MpegEncContext *s)
rcc->mv_bits_sum[rce.pict_type] += rce.mv_bits;
rcc->frame_count[rce.pict_type]++;
- get_qscale(s, &rce, rcc->pass1_wanted_bits / rcc->pass1_rc_eq_output_sum, i);
+ get_qscale(m, &rce, rcc->pass1_wanted_bits / rcc->pass1_rc_eq_output_sum, i);
// FIXME misbehaves a little for variable fps
rcc->pass1_wanted_bits += s->bit_rate / get_fps(s->avctx);
@@ -717,9 +723,10 @@ av_cold void ff_rate_control_uninit(RateControlContext *rcc)
av_freep(&rcc->cplx_tab);
}
-int ff_vbv_update(MpegEncContext *s, int frame_size)
+int ff_vbv_update(MPVMainEncContext *m, int frame_size)
{
- RateControlContext *rcc = &s->rc_context;
+ MpegEncContext *const s = &m->s;
+ RateControlContext *const rcc = &m->rc_context;
const double fps = get_fps(s->avctx);
const int buffer_size = s->avctx->rc_buffer_size;
const double min_rate = s->avctx->rc_min_rate / fps;
@@ -893,9 +900,10 @@ static void adaptive_quantization(RateControlContext *const rcc,
}
}
-void ff_get_2pass_fcode(MpegEncContext *s)
+void ff_get_2pass_fcode(MPVMainEncContext *const m)
{
- const RateControlContext *rcc = &s->rc_context;
+ MpegEncContext *const s = &m->s;
+ const RateControlContext *rcc = &m->rc_context;
const RateControlEntry *rce = &rcc->entry[s->picture_number];
s->f_code = rce->f_code;
@@ -904,8 +912,10 @@ void ff_get_2pass_fcode(MpegEncContext *s)
// FIXME rd or at least approx for dquant
-float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
+float ff_rate_estimate_qscale(MPVMainEncContext *const m, int dry_run)
{
+ MpegEncContext *const s = &m->s;
+ RateControlContext *rcc = &m->rc_context;
float q;
int qmin, qmax;
float br_compensation;
@@ -914,7 +924,6 @@ float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
double fps;
int picture_number = s->picture_number;
int64_t wanted_bits;
- RateControlContext *rcc = &s->rc_context;
AVCodecContext *a = s->avctx;
RateControlEntry local_rce, *rce;
double bits;
@@ -931,11 +940,11 @@ float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
const int64_t last_var =
s->last_pict_type == AV_PICTURE_TYPE_I ? rcc->last_mb_var_sum
: rcc->last_mc_mb_var_sum;
- av_assert1(s->frame_bits >= s->stuffing_bits);
+ av_assert1(m->frame_bits >= m->stuffing_bits);
update_predictor(&rcc->pred[s->last_pict_type],
rcc->last_qscale,
sqrt(last_var),
- s->frame_bits - s->stuffing_bits);
+ m->frame_bits - m->stuffing_bits);
}
if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
@@ -970,7 +979,7 @@ float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
wanted_bits = (int64_t)wanted_bits_double;
}
- diff = s->total_bits - wanted_bits;
+ diff = m->total_bits - wanted_bits;
br_compensation = (a->bit_rate_tolerance - diff) / a->bit_rate_tolerance;
if (br_compensation <= 0.0)
br_compensation = 0.001;
@@ -984,7 +993,7 @@ float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
q = rce->new_qscale / br_compensation;
ff_dlog(s->avctx, "%f %f %f last:%d var:%"PRId64" type:%d//\n", q, rce->new_qscale,
- br_compensation, s->frame_bits, var, pict_type);
+ br_compensation, m->frame_bits, var, pict_type);
} else {
rce->pict_type =
rce->new_pict_type = pict_type;
@@ -1015,12 +1024,12 @@ float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
rate_factor = rcc->pass1_wanted_bits /
rcc->pass1_rc_eq_output_sum * br_compensation;
- q = get_qscale(s, rce, rate_factor, picture_number);
+ q = get_qscale(m, rce, rate_factor, picture_number);
if (q < 0)
return -1;
av_assert0(q > 0.0);
- q = get_diff_limited_q(s, rce, q);
+ q = get_diff_limited_q(m, rce, q);
av_assert0(q > 0.0);
// FIXME type dependent blur like in 2-pass
@@ -1034,7 +1043,7 @@ float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
}
av_assert0(q > 0.0);
- q = modify_qscale(s, rce, q, picture_number);
+ q = modify_qscale(m, rce, q, picture_number);
rcc->pass1_wanted_bits += s->bit_rate / fps;
@@ -1047,8 +1056,8 @@ float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
"size:%d var:%"PRId64"/%"PRId64" br:%"PRId64" fps:%d\n",
av_get_picture_type_char(pict_type),
qmin, q, qmax, picture_number,
- wanted_bits / 1000, s->total_bits / 1000,
- br_compensation, short_term_q, s->frame_bits,
+ wanted_bits / 1000, m->total_bits / 1000,
+ br_compensation, short_term_q, m->frame_bits,
s->mb_var_sum, s->mc_mb_var_sum,
s->bit_rate / 1000, (int)fps);
}
diff --git a/libavcodec/ratecontrol.h b/libavcodec/ratecontrol.h
index b889491335..3bcfa5658a 100644
--- a/libavcodec/ratecontrol.h
+++ b/libavcodec/ratecontrol.h
@@ -93,14 +93,14 @@ typedef struct RateControlContext{
float *cplx_tab, *bits_tab;
}RateControlContext;
-struct MpegEncContext;
+typedef struct MPVMainEncContext MPVMainEncContext;
/* rate control */
-int ff_rate_control_init(struct MpegEncContext *s);
-float ff_rate_estimate_qscale(struct MpegEncContext *s, int dry_run);
-void ff_write_pass1_stats(struct MpegEncContext *s);
-int ff_vbv_update(struct MpegEncContext *s, int frame_size);
-void ff_get_2pass_fcode(struct MpegEncContext *s);
+int ff_rate_control_init(MPVMainEncContext *m);
+float ff_rate_estimate_qscale(MPVMainEncContext *m, int dry_run);
+void ff_write_pass1_stats(MPVMainEncContext *m);
+int ff_vbv_update(MPVMainEncContext *m, int frame_size);
+void ff_get_2pass_fcode(MPVMainEncContext *m);
void ff_rate_control_uninit(RateControlContext *rcc);
#endif /* AVCODEC_RATECONTROL_H */
diff --git a/libavcodec/snowenc.c b/libavcodec/snowenc.c
index 4d69c3053d..830f6f5096 100644
--- a/libavcodec/snowenc.c
+++ b/libavcodec/snowenc.c
@@ -61,7 +61,7 @@ typedef struct SnowEncContext {
int scenechange_threshold;
MECmpContext mecc;
- MpegEncContext m; // needed for motion estimation, should not be used for anything else, the idea is to eventually make the motion estimation independent of MpegEncContext, so this will be removed then (FIXME/XXX)
+ MPVMainEncContext m; // needed for motion estimation, should not be used for anything else, the idea is to eventually make the motion estimation independent of MpegEncContext, so this will be removed then (FIXME/XXX)
MPVPicture cur_pic, last_pic;
#define ME_CACHE_SIZE 1024
unsigned me_cache[ME_CACHE_SIZE];
@@ -160,7 +160,7 @@ static av_cold int encode_init(AVCodecContext *avctx)
{
SnowEncContext *const enc = avctx->priv_data;
SnowContext *const s = &enc->com;
- MpegEncContext *const mpv = &enc->m;
+ MpegEncContext *const mpv = &enc->m.s;
int plane_index, ret;
int i;
@@ -217,7 +217,7 @@ static av_cold int encode_init(AVCodecContext *avctx)
mcf(12,12)
ff_me_cmp_init(&enc->mecc, avctx);
- ret = ff_me_init(&enc->m.me, avctx, &enc->mecc, 0);
+ ret = ff_me_init(&mpv->me, avctx, &enc->mecc, 0);
if (ret < 0)
return ret;
ff_mpegvideoencdsp_init(&enc->mpvencdsp, avctx);
@@ -252,7 +252,7 @@ static av_cold int encode_init(AVCodecContext *avctx)
return AVERROR(ENOMEM);
}
if((avctx->flags&AV_CODEC_FLAG_PASS2) || !(avctx->flags&AV_CODEC_FLAG_QSCALE)){
- ret = ff_rate_control_init(mpv);
+ ret = ff_rate_control_init(&enc->m);
if(ret < 0)
return ret;
}
@@ -369,7 +369,7 @@ static inline int get_penalty_factor(int lambda, int lambda2, int type){
static int encode_q_branch(SnowEncContext *enc, int level, int x, int y)
{
SnowContext *const s = &enc->com;
- MotionEstContext *const c = &enc->m.me;
+ MotionEstContext *const c = &enc->m.s.me;
uint8_t p_buffer[1024];
uint8_t i_buffer[1024];
uint8_t p_state[sizeof(s->block_state)];
@@ -435,9 +435,9 @@ static int encode_q_branch(SnowEncContext *enc, int level, int x, int y)
last_mv[2][0]= bottom->mx;
last_mv[2][1]= bottom->my;
- enc->m.mb_stride = 2;
- enc->m.mb_x =
- enc->m.mb_y = 0;
+ enc->m.s.mb_stride = 2;
+ enc->m.s.mb_x =
+ enc->m.s.mb_y = 0;
c->skip= 0;
av_assert1(c-> stride == stride);
@@ -446,7 +446,7 @@ static int encode_q_branch(SnowEncContext *enc, int level, int x, int y)
c->penalty_factor = get_penalty_factor(enc->lambda, enc->lambda2, c->avctx->me_cmp);
c->sub_penalty_factor= get_penalty_factor(enc->lambda, enc->lambda2, c->avctx->me_sub_cmp);
c->mb_penalty_factor = get_penalty_factor(enc->lambda, enc->lambda2, c->avctx->mb_cmp);
- c->current_mv_penalty = c->mv_penalty[enc->m.f_code=1] + MAX_DMV;
+ c->current_mv_penalty = c->mv_penalty[enc->m.s.f_code=1] + MAX_DMV;
c->xmin = - x*block_w - 16+3;
c->ymin = - y*block_w - 16+3;
@@ -482,7 +482,7 @@ static int encode_q_branch(SnowEncContext *enc, int level, int x, int y)
for(ref=0; ref<s->ref_frames; ref++){
init_ref(c, current_data, s->last_picture[ref]->data, NULL, block_w*x, block_w*y, 0);
- ref_score= ff_epzs_motion_search(&enc->m, &ref_mx, &ref_my, P, 0, /*ref_index*/ 0, last_mv,
+ ref_score = ff_epzs_motion_search(&enc->m.s, &ref_mx, &ref_my, P, 0, /*ref_index*/ 0, last_mv,
(1<<16)>>shift, level-LOG2_MB_SIZE+4, block_w);
av_assert2(ref_mx >= c->xmin);
@@ -490,8 +490,10 @@ static int encode_q_branch(SnowEncContext *enc, int level, int x, int y)
av_assert2(ref_my >= c->ymin);
av_assert2(ref_my <= c->ymax);
- ref_score= c->sub_motion_search(&enc->m, &ref_mx, &ref_my, ref_score, 0, 0, level-LOG2_MB_SIZE+4, block_w);
- ref_score= ff_get_mb_score(&enc->m, ref_mx, ref_my, 0, 0, level-LOG2_MB_SIZE+4, block_w, 0);
+ ref_score = c->sub_motion_search(&enc->m.s, &ref_mx, &ref_my, ref_score,
+ 0, 0, level-LOG2_MB_SIZE+4, block_w);
+ ref_score = ff_get_mb_score(&enc->m.s, ref_mx, ref_my, 0, 0,
+ level-LOG2_MB_SIZE+4, block_w, 0);
ref_score+= 2*av_log2(2*ref)*c->penalty_factor;
if(s->ref_mvs[ref]){
s->ref_mvs[ref][index][0]= ref_mx;
@@ -567,7 +569,7 @@ static int encode_q_branch(SnowEncContext *enc, int level, int x, int y)
if (vard <= 64 || vard < varc)
c->scene_change_score+= ff_sqrt(vard) - ff_sqrt(varc);
else
- c->scene_change_score += enc->m.qscale;
+ c->scene_change_score += enc->m.s.qscale;
}
if(level!=s->block_max_depth){
@@ -670,7 +672,7 @@ static int get_dc(SnowEncContext *enc, int mb_x, int mb_y, int plane_index)
const int obmc_stride= plane_index ? (2*block_size)>>s->chroma_h_shift : 2*block_size;
const int ref_stride= s->current_picture->linesize[plane_index];
const uint8_t *src = s->input_picture->data[plane_index];
- IDWTELEM *dst= (IDWTELEM*)enc->m.sc.obmc_scratchpad + plane_index*block_size*block_size*4; //FIXME change to unsigned
+ IDWTELEM *dst= (IDWTELEM*)enc->m.s.sc.obmc_scratchpad + plane_index*block_size*block_size*4; //FIXME change to unsigned
const int b_stride = s->b_width << s->block_max_depth;
const int w= p->width;
const int h= p->height;
@@ -768,7 +770,7 @@ static int get_block_rd(SnowEncContext *enc, int mb_x, int mb_y,
const int ref_stride= s->current_picture->linesize[plane_index];
uint8_t *dst= s->current_picture->data[plane_index];
const uint8_t *src = s->input_picture->data[plane_index];
- IDWTELEM *pred= (IDWTELEM*)enc->m.sc.obmc_scratchpad + plane_index*block_size*block_size*4;
+ IDWTELEM *pred= (IDWTELEM*)enc->m.s.sc.obmc_scratchpad + plane_index*block_size*block_size*4;
uint8_t *cur = s->scratchbuf;
uint8_t *tmp = s->emu_edge_buffer;
const int b_stride = s->b_width << s->block_max_depth;
@@ -831,19 +833,19 @@ static int get_block_rd(SnowEncContext *enc, int mb_x, int mb_y,
* to improve the score of the whole frame, thus iterative motion
* estimation does not always converge. */
if(s->avctx->me_cmp == FF_CMP_W97)
- distortion = ff_w97_32_c(&enc->m, src + sx + sy*ref_stride, dst + sx + sy*ref_stride, ref_stride, 32);
+ distortion = ff_w97_32_c(&enc->m.s, src + sx + sy*ref_stride, dst + sx + sy*ref_stride, ref_stride, 32);
else if(s->avctx->me_cmp == FF_CMP_W53)
- distortion = ff_w53_32_c(&enc->m, src + sx + sy*ref_stride, dst + sx + sy*ref_stride, ref_stride, 32);
+ distortion = ff_w53_32_c(&enc->m.s, src + sx + sy*ref_stride, dst + sx + sy*ref_stride, ref_stride, 32);
else{
distortion = 0;
for(i=0; i<4; i++){
int off = sx+16*(i&1) + (sy+16*(i>>1))*ref_stride;
- distortion += enc->m.me.me_cmp[0](&enc->m, src + off, dst + off, ref_stride, 16);
+ distortion += enc->m.s.me.me_cmp[0](&enc->m.s, src + off, dst + off, ref_stride, 16);
}
}
}else{
av_assert2(block_w==8);
- distortion = enc->m.me.me_cmp[0](&enc->m, src + sx + sy*ref_stride, dst + sx + sy*ref_stride, ref_stride, block_w*2);
+ distortion = enc->m.s.me.me_cmp[0](&enc->m.s, src + sx + sy*ref_stride, dst + sx + sy*ref_stride, ref_stride, block_w*2);
}
if(plane_index==0){
@@ -909,7 +911,7 @@ static int get_4block_rd(SnowEncContext *enc, int mb_x, int mb_y, int plane_inde
}
av_assert1(block_w== 8 || block_w==16);
- distortion += enc->m.me.me_cmp[block_w==8](&enc->m, src + x + y*ref_stride, dst + x + y*ref_stride, ref_stride, block_h);
+ distortion += enc->m.s.me.me_cmp[block_w==8](&enc->m.s, src + x + y*ref_stride, dst + x + y*ref_stride, ref_stride, block_h);
}
if(plane_index==0){
@@ -1706,11 +1708,11 @@ static int ratecontrol_1pass(SnowEncContext *enc, AVFrame *pict)
coef_sum = (uint64_t)coef_sum * coef_sum >> 16;
if(pict->pict_type == AV_PICTURE_TYPE_I){
- enc->m.mb_var_sum = coef_sum;
- enc->m.mc_mb_var_sum = 0;
+ enc->m.s.mb_var_sum = coef_sum;
+ enc->m.s.mc_mb_var_sum = 0;
}else{
- enc->m.mc_mb_var_sum = coef_sum;
- enc->m.mb_var_sum = 0;
+ enc->m.s.mc_mb_var_sum = coef_sum;
+ enc->m.s.mb_var_sum = 0;
}
pict->quality= ff_rate_estimate_qscale(&enc->m, 1);
@@ -1757,7 +1759,7 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
{
SnowEncContext *const enc = avctx->priv_data;
SnowContext *const s = &enc->com;
- MpegEncContext *const mpv = &enc->m;
+ MpegEncContext *const mpv = &enc->m.s;
RangeCoder * const c= &s->c;
AVCodecInternal *avci = avctx->internal;
AVFrame *pic;
@@ -1793,10 +1795,10 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
mpv->picture_number = avctx->frame_num;
if(avctx->flags&AV_CODEC_FLAG_PASS2){
- mpv->pict_type = pic->pict_type = mpv->rc_context.entry[avctx->frame_num].new_pict_type;
+ mpv->pict_type = pic->pict_type = enc->m.rc_context.entry[avctx->frame_num].new_pict_type;
s->keyframe = pic->pict_type == AV_PICTURE_TYPE_I;
if(!(avctx->flags&AV_CODEC_FLAG_QSCALE)) {
- pic->quality = ff_rate_estimate_qscale(mpv, 0);
+ pic->quality = ff_rate_estimate_qscale(&enc->m, 0);
if (pic->quality < 0)
return -1;
}
@@ -1877,7 +1879,7 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
mpv->qdsp = enc->qdsp; //move
mpv->hdsp = s->hdsp;
- ff_me_init_pic(&enc->m);
+ ff_me_init_pic(mpv);
s->hdsp = mpv->hdsp;
}
@@ -2043,17 +2045,19 @@ redo_frame:
s->current_picture->pict_type = pic->pict_type;
s->current_picture->quality = pic->quality;
- mpv->frame_bits = 8 * (s->c.bytestream - s->c.bytestream_start);
- mpv->p_tex_bits = mpv->frame_bits - mpv->misc_bits - mpv->mv_bits;
- mpv->total_bits += 8*(s->c.bytestream - s->c.bytestream_start);
+ enc->m.frame_bits = 8 * (s->c.bytestream - s->c.bytestream_start);
+ mpv->p_tex_bits = enc->m.frame_bits - mpv->misc_bits - mpv->mv_bits;
+ enc->m.total_bits += 8*(s->c.bytestream - s->c.bytestream_start);
enc->cur_pic.display_picture_number =
enc->cur_pic.coded_picture_number = avctx->frame_num;
enc->cur_pic.f->quality = pic->quality;
- if (enc->pass1_rc)
- if (ff_rate_estimate_qscale(mpv, 0) < 0)
- return -1;
+ if (enc->pass1_rc) {
+ ret = ff_rate_estimate_qscale(&enc->m, 0);
+ if (ret < 0)
+ return ret;
+ }
if(avctx->flags&AV_CODEC_FLAG_PASS1)
- ff_write_pass1_stats(mpv);
+ ff_write_pass1_stats(&enc->m);
mpv->last_pict_type = mpv->pict_type;
emms_c();
@@ -2088,10 +2092,10 @@ static av_cold int encode_end(AVCodecContext *avctx)
av_freep(&s->ref_scores[i]);
}
- enc->m.me.temp = NULL;
- av_freep(&enc->m.me.scratchpad);
- av_freep(&enc->m.me.map);
- av_freep(&enc->m.sc.obmc_scratchpad);
+ enc->m.s.me.temp = NULL;
+ av_freep(&enc->m.s.me.scratchpad);
+ av_freep(&enc->m.s.me.map);
+ av_freep(&enc->m.s.sc.obmc_scratchpad);
av_freep(&avctx->stats_out);
More information about the ffmpeg-cvslog
mailing list