[FFmpeg-devel] [PATCH] avcodec/asv: Split ASV1Context into decoder and encoder contexts
Andreas Rheinhardt
andreas.rheinhardt at outlook.com
Tue Oct 4 18:43:35 EEST 2022
Andreas Rheinhardt:
> A lot of the stuff in ASV1Context is actually only used
> by decoders or encoders, but not both: Of the seven contexts
> in ASV1Context, only the BswapDSPContext is used by both.
> So splitting makes sense.
>
> Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt at outlook.com>
> ---
> libavcodec/asv.c | 4 ++-
> libavcodec/asv.h | 25 ++--------------
> libavcodec/asvdec.c | 72 ++++++++++++++++++++++++++++-----------------
> libavcodec/asvenc.c | 69 ++++++++++++++++++++++++++-----------------
> 4 files changed, 92 insertions(+), 78 deletions(-)
>
> diff --git a/libavcodec/asv.c b/libavcodec/asv.c
> index dcae90982a..3aa08c30c0 100644
> --- a/libavcodec/asv.c
> +++ b/libavcodec/asv.c
> @@ -25,6 +25,8 @@
>
> #include <stdint.h>
>
> +#include "libavutil/attributes.h"
> +
> #include "asv.h"
> #include "avcodec.h"
> #include "bswapdsp.h"
> @@ -88,7 +90,7 @@ const uint16_t ff_asv2_level_tab[63][2] = {
>
> av_cold void ff_asv_common_init(AVCodecContext *avctx)
> {
> - ASV1Context *const a = avctx->priv_data;
> + ASVCommonContext *const a = avctx->priv_data;
>
> ff_bswapdsp_init(&a->bbdsp);
>
> diff --git a/libavcodec/asv.h b/libavcodec/asv.h
> index 269bbe7c18..7c0983a497 100644
> --- a/libavcodec/asv.h
> +++ b/libavcodec/asv.h
> @@ -28,38 +28,17 @@
>
> #include <stdint.h>
>
> -#include "libavutil/mem_internal.h"
> -
> #include "avcodec.h"
> -#include "blockdsp.h"
> #include "bswapdsp.h"
> -#include "fdctdsp.h"
> -#include "idctdsp.h"
> -#include "get_bits.h"
> -#include "pixblockdsp.h"
> -#include "put_bits.h"
>
> -typedef struct ASV1Context {
> +typedef struct ASVCommonContext {
> AVCodecContext *avctx;
> - BlockDSPContext bdsp;
> BswapDSPContext bbdsp;
> - FDCTDSPContext fdsp;
> - IDCTDSPContext idsp;
> - PixblockDSPContext pdsp;
> - PutBitContext pb;
> - GetBitContext gb;
> - ScanTable scantable;
> - int inv_qscale;
> int mb_width;
> int mb_height;
> int mb_width2;
> int mb_height2;
> - DECLARE_ALIGNED(32, int16_t, block)[6][64];
> - uint16_t intra_matrix[64];
> - int q_intra_matrix[64];
> - uint8_t *bitstream_buffer;
> - unsigned int bitstream_buffer_size;
> -} ASV1Context;
> +} ASVCommonContext;
>
> extern const uint8_t ff_asv_scantab[64];
> extern const uint8_t ff_asv_ccp_tab[17][2];
> diff --git a/libavcodec/asvdec.c b/libavcodec/asvdec.c
> index 4ca370d1ec..81260058fc 100644
> --- a/libavcodec/asvdec.c
> +++ b/libavcodec/asvdec.c
> @@ -25,6 +25,7 @@
>
> #include "libavutil/attributes.h"
> #include "libavutil/mem.h"
> +#include "libavutil/mem_internal.h"
> #include "libavutil/thread.h"
>
> #include "asv.h"
> @@ -33,6 +34,7 @@
> #include "codec_internal.h"
> #include "config_components.h"
> #include "decode.h"
> +#include "get_bits.h"
> #include "idctdsp.h"
> #include "mpeg12data.h"
>
> @@ -48,6 +50,20 @@ static VLC dc_ccp_vlc;
> static VLC ac_ccp_vlc;
> static VLC asv2_level_vlc;
>
> +typedef struct ASVDecContext {
> + ASVCommonContext c;
> +
> + GetBitContext gb;
> +
> + BlockDSPContext bdsp;
> + IDCTDSPContext idsp;
> + ScanTable scantable;
> + DECLARE_ALIGNED(32, int16_t, block)[6][64];
> + uint16_t intra_matrix[64];
> + uint8_t *bitstream_buffer;
> + unsigned int bitstream_buffer_size;
> +} ASVDecContext;
> +
> static av_cold void init_vlcs(void)
> {
> INIT_VLC_STATIC(&ccp_vlc, CCP_VLC_BITS, 17,
> @@ -106,7 +122,7 @@ static inline int asv2_get_level(GetBitContext *gb)
> return code - 31;
> }
>
> -static inline int asv1_decode_block(ASV1Context *a, int16_t block[64])
> +static inline int asv1_decode_block(ASVDecContext *a, int16_t block[64])
> {
> int i;
>
> @@ -119,7 +135,7 @@ static inline int asv1_decode_block(ASV1Context *a, int16_t block[64])
> if (ccp == 16)
> break;
> if (ccp < 0 || i >= 10) {
> - av_log(a->avctx, AV_LOG_ERROR, "coded coeff pattern damaged\n");
> + av_log(a->c.avctx, AV_LOG_ERROR, "coded coeff pattern damaged\n");
> return AVERROR_INVALIDDATA;
> }
>
> @@ -137,7 +153,7 @@ static inline int asv1_decode_block(ASV1Context *a, int16_t block[64])
> return 0;
> }
>
> -static inline int asv2_decode_block(ASV1Context *a, int16_t block[64])
> +static inline int asv2_decode_block(ASVDecContext *a, int16_t block[64])
> {
> int i, count, ccp;
>
> @@ -173,13 +189,13 @@ static inline int asv2_decode_block(ASV1Context *a, int16_t block[64])
> return 0;
> }
>
> -static inline int decode_mb(ASV1Context *a, int16_t block[6][64])
> +static inline int decode_mb(ASVDecContext *a, int16_t block[6][64])
> {
> int i, ret;
>
> a->bdsp.clear_blocks(block[0]);
>
> - if (a->avctx->codec_id == AV_CODEC_ID_ASV1) {
> + if (a->c.avctx->codec_id == AV_CODEC_ID_ASV1) {
> for (i = 0; i < 6; i++) {
> if ((ret = asv1_decode_block(a, block[i])) < 0)
> return ret;
> @@ -193,7 +209,7 @@ static inline int decode_mb(ASV1Context *a, int16_t block[6][64])
> return 0;
> }
>
> -static inline void idct_put(ASV1Context *a, AVFrame *frame, int mb_x, int mb_y)
> +static inline void idct_put(ASVDecContext *a, AVFrame *frame, int mb_x, int mb_y)
> {
> int16_t(*block)[64] = a->block;
> int linesize = frame->linesize[0];
> @@ -207,7 +223,7 @@ static inline void idct_put(ASV1Context *a, AVFrame *frame, int mb_x, int mb_y)
> a->idsp.idct_put(dest_y + 8 * linesize, linesize, block[2]);
> a->idsp.idct_put(dest_y + 8 * linesize + 8, linesize, block[3]);
>
> - if (!(a->avctx->flags & AV_CODEC_FLAG_GRAY)) {
> + if (!(a->c.avctx->flags & AV_CODEC_FLAG_GRAY)) {
> a->idsp.idct_put(dest_cb, frame->linesize[1], block[4]);
> a->idsp.idct_put(dest_cr, frame->linesize[2], block[5]);
> }
> @@ -216,12 +232,13 @@ static inline void idct_put(ASV1Context *a, AVFrame *frame, int mb_x, int mb_y)
> static int decode_frame(AVCodecContext *avctx, AVFrame *p,
> int *got_frame, AVPacket *avpkt)
> {
> - ASV1Context *const a = avctx->priv_data;
> + ASVDecContext *const a = avctx->priv_data;
> + const ASVCommonContext *const c = &a->c;
> const uint8_t *buf = avpkt->data;
> int buf_size = avpkt->size;
> - int mb_x, mb_y, ret;
> + int ret;
>
> - if (buf_size * 8LL < a->mb_height * a->mb_width * 13LL)
> + if (buf_size * 8LL < c->mb_height * c->mb_width * 13LL)
> return AVERROR_INVALIDDATA;
>
> if ((ret = ff_get_buffer(avctx, p, 0)) < 0)
> @@ -235,7 +252,7 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *p,
> if (!a->bitstream_buffer)
> return AVERROR(ENOMEM);
>
> - a->bbdsp.bswap_buf((uint32_t *) a->bitstream_buffer,
> + c->bbdsp.bswap_buf((uint32_t *) a->bitstream_buffer,
> (const uint32_t *) buf, buf_size / 4);
> ret = init_get_bits8(&a->gb, a->bitstream_buffer, buf_size);
> } else {
> @@ -244,8 +261,8 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *p,
> if (ret < 0)
> return ret;
>
> - for (mb_y = 0; mb_y < a->mb_height2; mb_y++) {
> - for (mb_x = 0; mb_x < a->mb_width2; mb_x++) {
> + for (int mb_y = 0; mb_y < c->mb_height2; mb_y++) {
> + for (int mb_x = 0; mb_x < c->mb_width2; mb_x++) {
> if ((ret = decode_mb(a, a->block)) < 0)
> return ret;
>
> @@ -253,9 +270,9 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *p,
> }
> }
>
> - if (a->mb_width2 != a->mb_width) {
> - mb_x = a->mb_width2;
> - for (mb_y = 0; mb_y < a->mb_height2; mb_y++) {
> + if (c->mb_width2 != c->mb_width) {
> + int mb_x = c->mb_width2;
> + for (int mb_y = 0; mb_y < c->mb_height2; mb_y++) {
> if ((ret = decode_mb(a, a->block)) < 0)
> return ret;
>
> @@ -263,9 +280,9 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *p,
> }
> }
>
> - if (a->mb_height2 != a->mb_height) {
> - mb_y = a->mb_height2;
> - for (mb_x = 0; mb_x < a->mb_width; mb_x++) {
> + if (c->mb_height2 != c->mb_height) {
> + int mb_y = c->mb_height2;
> + for (int mb_x = 0; mb_x < c->mb_width; mb_x++) {
> if ((ret = decode_mb(a, a->block)) < 0)
> return ret;
>
> @@ -283,8 +300,9 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *p,
> static av_cold int decode_init(AVCodecContext *avctx)
> {
> static AVOnce init_static_once = AV_ONCE_INIT;
> - ASV1Context *const a = avctx->priv_data;
> + ASVDecContext *const a = avctx->priv_data;
> const int scale = avctx->codec_id == AV_CODEC_ID_ASV1 ? 1 : 2;
> + int inv_qscale;
> int i;
>
> if (avctx->extradata_size < 1) {
> @@ -297,19 +315,19 @@ static av_cold int decode_init(AVCodecContext *avctx)
> ff_init_scantable(a->idsp.idct_permutation, &a->scantable, ff_asv_scantab);
> avctx->pix_fmt = AV_PIX_FMT_YUV420P;
>
> - if (avctx->extradata_size < 1 || (a->inv_qscale = avctx->extradata[0]) == 0) {
> + if (avctx->extradata_size < 1 || (inv_qscale = avctx->extradata[0]) == 0) {
> av_log(avctx, AV_LOG_ERROR, "illegal qscale 0\n");
> if (avctx->codec_id == AV_CODEC_ID_ASV1)
> - a->inv_qscale = 6;
> + inv_qscale = 6;
> else
> - a->inv_qscale = 10;
> + inv_qscale = 10;
> }
>
> for (i = 0; i < 64; i++) {
> int index = ff_asv_scantab[i];
>
> a->intra_matrix[i] = 64 * scale * ff_mpeg1_default_intra_matrix[index] /
> - a->inv_qscale;
> + inv_qscale;
> }
>
> ff_thread_once(&init_static_once, init_vlcs);
> @@ -319,7 +337,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
>
> static av_cold int decode_end(AVCodecContext *avctx)
> {
> - ASV1Context *const a = avctx->priv_data;
> + ASVDecContext *const a = avctx->priv_data;
>
> av_freep(&a->bitstream_buffer);
> a->bitstream_buffer_size = 0;
> @@ -333,7 +351,7 @@ const FFCodec ff_asv1_decoder = {
> CODEC_LONG_NAME("ASUS V1"),
> .p.type = AVMEDIA_TYPE_VIDEO,
> .p.id = AV_CODEC_ID_ASV1,
> - .priv_data_size = sizeof(ASV1Context),
> + .priv_data_size = sizeof(ASVDecContext),
> .init = decode_init,
> .close = decode_end,
> FF_CODEC_DECODE_CB(decode_frame),
> @@ -347,7 +365,7 @@ const FFCodec ff_asv2_decoder = {
> CODEC_LONG_NAME("ASUS V2"),
> .p.type = AVMEDIA_TYPE_VIDEO,
> .p.id = AV_CODEC_ID_ASV2,
> - .priv_data_size = sizeof(ASV1Context),
> + .priv_data_size = sizeof(ASVDecContext),
> .init = decode_init,
> FF_CODEC_DECODE_CB(decode_frame),
> .p.capabilities = AV_CODEC_CAP_DR1,
> diff --git a/libavcodec/asvenc.c b/libavcodec/asvenc.c
> index 25ea96e64e..e185d501b3 100644
> --- a/libavcodec/asvenc.c
> +++ b/libavcodec/asvenc.c
> @@ -27,6 +27,7 @@
>
> #include "libavutil/attributes.h"
> #include "libavutil/mem.h"
> +#include "libavutil/mem_internal.h"
>
> #include "aandcttab.h"
> #include "asv.h"
> @@ -36,6 +37,19 @@
> #include "encode.h"
> #include "fdctdsp.h"
> #include "mpeg12data.h"
> +#include "pixblockdsp.h"
> +#include "put_bits.h"
> +
> +typedef struct ASVEncContext {
> + ASVCommonContext c;
> +
> + PutBitContext pb;
> +
> + PixblockDSPContext pdsp;
> + FDCTDSPContext fdsp;
> + DECLARE_ALIGNED(32, int16_t, block)[6][64];
> + int q_intra_matrix[64];
> +} ASVEncContext;
>
> static inline void asv1_put_level(PutBitContext *pb, int level)
> {
> @@ -49,7 +63,7 @@ static inline void asv1_put_level(PutBitContext *pb, int level)
> }
> }
>
> -static inline void asv2_put_level(ASV1Context *a, PutBitContext *pb, int level)
> +static inline void asv2_put_level(ASVEncContext *a, PutBitContext *pb, int level)
> {
> unsigned int index = level + 31;
>
> @@ -58,14 +72,14 @@ static inline void asv2_put_level(ASV1Context *a, PutBitContext *pb, int level)
> } else {
> put_bits_le(pb, 5, 0); /* Escape code */
> if (level < -128 || level > 127) {
> - av_log(a->avctx, AV_LOG_WARNING, "Clipping level %d, increase qscale\n", level);
> + av_log(a->c.avctx, AV_LOG_WARNING, "Clipping level %d, increase qscale\n", level);
> level = av_clip_int8(level);
> }
> put_bits_le(pb, 8, level & 0xFF);
> }
> }
>
> -static inline void asv1_encode_block(ASV1Context *a, int16_t block[64])
> +static inline void asv1_encode_block(ASVEncContext *a, int16_t block[64])
> {
> int i;
> int nc_count = 0;
> @@ -111,7 +125,7 @@ static inline void asv1_encode_block(ASV1Context *a, int16_t block[64])
> put_bits(&a->pb, 5, 0xF); /* End of block */
> }
>
> -static inline void asv2_encode_block(ASV1Context *a, int16_t block[64])
> +static inline void asv2_encode_block(ASVEncContext *a, int16_t block[64])
> {
> int i;
> int count = 0;
> @@ -166,13 +180,13 @@ static inline void asv2_encode_block(ASV1Context *a, int16_t block[64])
>
> #define MAX_MB_SIZE (30 * 16 * 16 * 3 / 2 / 8)
>
> -static inline int encode_mb(ASV1Context *a, int16_t block[6][64])
> +static inline int encode_mb(ASVEncContext *a, int16_t block[6][64])
> {
> int i;
>
> av_assert0(put_bytes_left(&a->pb, 0) >= MAX_MB_SIZE);
>
> - if (a->avctx->codec_id == AV_CODEC_ID_ASV1) {
> + if (a->c.avctx->codec_id == AV_CODEC_ID_ASV1) {
> for (i = 0; i < 6; i++)
> asv1_encode_block(a, block[i]);
> } else {
> @@ -183,7 +197,7 @@ static inline int encode_mb(ASV1Context *a, int16_t block[6][64])
> return 0;
> }
>
> -static inline void dct_get(ASV1Context *a, const AVFrame *frame,
> +static inline void dct_get(ASVEncContext *a, const AVFrame *frame,
> int mb_x, int mb_y)
> {
> int16_t (*block)[64] = a->block;
> @@ -201,7 +215,7 @@ static inline void dct_get(ASV1Context *a, const AVFrame *frame,
> for (i = 0; i < 4; i++)
> a->fdsp.fdct(block[i]);
>
> - if (!(a->avctx->flags & AV_CODEC_FLAG_GRAY)) {
> + if (!(a->c.avctx->flags & AV_CODEC_FLAG_GRAY)) {
> a->pdsp.get_pixels(block[4], ptr_cb, frame->linesize[1]);
> a->pdsp.get_pixels(block[5], ptr_cr, frame->linesize[2]);
> for (i = 4; i < 6; i++)
> @@ -212,9 +226,9 @@ static inline void dct_get(ASV1Context *a, const AVFrame *frame,
> static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
> const AVFrame *pict, int *got_packet)
> {
> - ASV1Context *const a = avctx->priv_data;
> + ASVEncContext *const a = avctx->priv_data;
> + const ASVCommonContext *const c = &a->c;
> int size, ret;
> - int mb_x, mb_y;
>
> if (pict->width % 16 || pict->height % 16) {
> AVFrame *clone = av_frame_alloc();
> @@ -258,30 +272,30 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
> return ret;
> }
>
> - if ((ret = ff_alloc_packet(avctx, pkt, a->mb_height * a->mb_width * MAX_MB_SIZE +
> + if ((ret = ff_alloc_packet(avctx, pkt, c->mb_height * c->mb_width * MAX_MB_SIZE +
> AV_INPUT_BUFFER_MIN_SIZE)) < 0)
> return ret;
>
> init_put_bits(&a->pb, pkt->data, pkt->size);
>
> - for (mb_y = 0; mb_y < a->mb_height2; mb_y++) {
> - for (mb_x = 0; mb_x < a->mb_width2; mb_x++) {
> + for (int mb_y = 0; mb_y < c->mb_height2; mb_y++) {
> + for (int mb_x = 0; mb_x < c->mb_width2; mb_x++) {
> dct_get(a, pict, mb_x, mb_y);
> encode_mb(a, a->block);
> }
> }
>
> - if (a->mb_width2 != a->mb_width) {
> - mb_x = a->mb_width2;
> - for (mb_y = 0; mb_y < a->mb_height2; mb_y++) {
> + if (c->mb_width2 != c->mb_width) {
> + int mb_x = c->mb_width2;
> + for (int mb_y = 0; mb_y < c->mb_height2; mb_y++) {
> dct_get(a, pict, mb_x, mb_y);
> encode_mb(a, a->block);
> }
> }
>
> - if (a->mb_height2 != a->mb_height) {
> - mb_y = a->mb_height2;
> - for (mb_x = 0; mb_x < a->mb_width; mb_x++) {
> + if (c->mb_height2 != c->mb_height) {
> + int mb_y = c->mb_height2;
> + for (int mb_x = 0; mb_x < c->mb_width; mb_x++) {
> dct_get(a, pict, mb_x, mb_y);
> encode_mb(a, a->block);
> }
> @@ -296,7 +310,7 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
> size = (put_bytes_output(&a->pb) + 3) / 4;
>
> if (avctx->codec_id == AV_CODEC_ID_ASV1) {
> - a->bbdsp.bswap_buf((uint32_t *) pkt->data,
> + c->bbdsp.bswap_buf((uint32_t *) pkt->data,
> (uint32_t *) pkt->data, size);
> }
>
> @@ -308,9 +322,10 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
>
> static av_cold int encode_init(AVCodecContext *avctx)
> {
> - ASV1Context *const a = avctx->priv_data;
> + ASVEncContext *const a = avctx->priv_data;
> int i;
> const int scale = avctx->codec_id == AV_CODEC_ID_ASV1 ? 1 : 2;
> + int inv_qscale;
>
> ff_asv_common_init(avctx);
> ff_fdctdsp_init(&a->fdsp, avctx);
> @@ -319,23 +334,23 @@ static av_cold int encode_init(AVCodecContext *avctx)
> if (avctx->global_quality <= 0)
> avctx->global_quality = 4 * FF_QUALITY_SCALE;
>
> - a->inv_qscale = (32 * scale * FF_QUALITY_SCALE +
> + inv_qscale = (32 * scale * FF_QUALITY_SCALE +
> avctx->global_quality / 2) / avctx->global_quality;
>
> avctx->extradata = av_mallocz(8);
> if (!avctx->extradata)
> return AVERROR(ENOMEM);
> avctx->extradata_size = 8;
> - ((uint32_t *) avctx->extradata)[0] = av_le2ne32(a->inv_qscale);
> + AV_WLA(32, avctx->extradata, inv_qscale);
> ((uint32_t *) avctx->extradata)[1] = av_le2ne32(AV_RL32("ASUS"));
>
> for (i = 0; i < 64; i++) {
> if (a->fdsp.fdct == ff_fdct_ifast) {
> int q = 32LL * scale * ff_mpeg1_default_intra_matrix[i] * ff_aanscales[i];
> - a->q_intra_matrix[i] = (((int64_t)a->inv_qscale << 30) + q / 2) / q;
> + a->q_intra_matrix[i] = (((int64_t)inv_qscale << 30) + q / 2) / q;
> } else {
> int q = 32 * scale * ff_mpeg1_default_intra_matrix[i];
> - a->q_intra_matrix[i] = ((a->inv_qscale << 16) + q / 2) / q;
> + a->q_intra_matrix[i] = ((inv_qscale << 16) + q / 2) / q;
> }
> }
>
> @@ -349,7 +364,7 @@ const FFCodec ff_asv1_encoder = {
> .p.type = AVMEDIA_TYPE_VIDEO,
> .p.id = AV_CODEC_ID_ASV1,
> .p.capabilities = AV_CODEC_CAP_DR1,
> - .priv_data_size = sizeof(ASV1Context),
> + .priv_data_size = sizeof(ASVEncContext),
> .init = encode_init,
> FF_CODEC_ENCODE_CB(encode_frame),
> .p.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P,
> @@ -364,7 +379,7 @@ const FFCodec ff_asv2_encoder = {
> .p.type = AVMEDIA_TYPE_VIDEO,
> .p.id = AV_CODEC_ID_ASV2,
> .p.capabilities = AV_CODEC_CAP_DR1,
> - .priv_data_size = sizeof(ASV1Context),
> + .priv_data_size = sizeof(ASVEncContext),
> .init = encode_init,
> FF_CODEC_ENCODE_CB(encode_frame),
> .p.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P,
Will apply this patch tomorrow unless there are objections.
- Andreas
More information about the ffmpeg-devel
mailing list