[FFmpeg-cvslog] avcodec/aptx: split decoder and encoder into separate files
James Almer
git at videolan.org
Thu Feb 6 03:58:35 EET 2020
ffmpeg | branch: master | James Almer <jamrial at gmail.com> | Sun Dec 8 11:58:18 2019 -0300| [2383021a7a1ca0456e93440539349cc918c77a73] | committer: James Almer
avcodec/aptx: split decoder and encoder into separate files
Signed-off-by: James Almer <jamrial at gmail.com>
> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=2383021a7a1ca0456e93440539349cc918c77a73
---
libavcodec/Makefile | 8 +-
libavcodec/aptx.c | 639 +--------------------------------------------------
libavcodec/aptx.h | 220 ++++++++++++++++++
libavcodec/aptxdec.c | 204 ++++++++++++++++
libavcodec/aptxenc.c | 278 ++++++++++++++++++++++
5 files changed, 712 insertions(+), 637 deletions(-)
diff --git a/libavcodec/Makefile b/libavcodec/Makefile
index 9f9b7db54e..55899194e2 100644
--- a/libavcodec/Makefile
+++ b/libavcodec/Makefile
@@ -194,10 +194,10 @@ OBJS-$(CONFIG_AMV_ENCODER) += mjpegenc.o mjpegenc_common.o \
OBJS-$(CONFIG_ANM_DECODER) += anm.o
OBJS-$(CONFIG_ANSI_DECODER) += ansi.o cga_data.o
OBJS-$(CONFIG_APE_DECODER) += apedec.o
-OBJS-$(CONFIG_APTX_DECODER) += aptx.o
-OBJS-$(CONFIG_APTX_ENCODER) += aptx.o
-OBJS-$(CONFIG_APTX_HD_DECODER) += aptx.o
-OBJS-$(CONFIG_APTX_HD_ENCODER) += aptx.o
+OBJS-$(CONFIG_APTX_DECODER) += aptxdec.o aptx.o
+OBJS-$(CONFIG_APTX_ENCODER) += aptxenc.o aptx.o
+OBJS-$(CONFIG_APTX_HD_DECODER) += aptxdec.o aptx.o
+OBJS-$(CONFIG_APTX_HD_ENCODER) += aptxenc.o aptx.o
OBJS-$(CONFIG_APNG_DECODER) += png.o pngdec.o pngdsp.o
OBJS-$(CONFIG_APNG_ENCODER) += png.o pngenc.o
OBJS-$(CONFIG_ARBC_DECODER) += arbc.o
diff --git a/libavcodec/aptx.c b/libavcodec/aptx.c
index a2620a9212..3aeee1907c 100644
--- a/libavcodec/aptx.c
+++ b/libavcodec/aptx.c
@@ -20,81 +20,7 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#include "libavutil/intreadwrite.h"
-#include "avcodec.h"
-#include "internal.h"
-#include "mathops.h"
-#include "audio_frame_queue.h"
-
-
-enum channels {
- LEFT,
- RIGHT,
- NB_CHANNELS
-};
-
-enum subbands {
- LF, // Low Frequency (0-5.5 kHz)
- MLF, // Medium-Low Frequency (5.5-11kHz)
- MHF, // Medium-High Frequency (11-16.5kHz)
- HF, // High Frequency (16.5-22kHz)
- NB_SUBBANDS
-};
-
-#define NB_FILTERS 2
-#define FILTER_TAPS 16
-
-typedef struct {
- int pos;
- int32_t buffer[2*FILTER_TAPS];
-} FilterSignal;
-
-typedef struct {
- FilterSignal outer_filter_signal[NB_FILTERS];
- FilterSignal inner_filter_signal[NB_FILTERS][NB_FILTERS];
-} QMFAnalysis;
-
-typedef struct {
- int32_t quantized_sample;
- int32_t quantized_sample_parity_change;
- int32_t error;
-} Quantize;
-
-typedef struct {
- int32_t quantization_factor;
- int32_t factor_select;
- int32_t reconstructed_difference;
-} InvertQuantize;
-
-typedef struct {
- int32_t prev_sign[2];
- int32_t s_weight[2];
- int32_t d_weight[24];
- int32_t pos;
- int32_t reconstructed_differences[48];
- int32_t previous_reconstructed_sample;
- int32_t predicted_difference;
- int32_t predicted_sample;
-} Prediction;
-
-typedef struct {
- int32_t codeword_history;
- int32_t dither_parity;
- int32_t dither[NB_SUBBANDS];
-
- QMFAnalysis qmf;
- Quantize quantize[NB_SUBBANDS];
- InvertQuantize invert_quantize[NB_SUBBANDS];
- Prediction prediction[NB_SUBBANDS];
-} Channel;
-
-typedef struct {
- int hd;
- int block_size;
- int32_t sync_idx;
- Channel channels[NB_CHANNELS];
- AudioFrameQueue afq;
-} AptXContext;
+#include "aptx.h"
static const int32_t quantize_intervals_LF[65] = {
@@ -383,17 +309,7 @@ static const int16_t hd_quantize_factor_select_offset_HF[17] = {
70, 90, 115, 147, 192, 264, 398, 521, 521,
};
-typedef const struct {
- const int32_t *quantize_intervals;
- const int32_t *invert_quantize_dither_factors;
- const int32_t *quantize_dither_factors;
- const int16_t *quantize_factor_select_offset;
- int tables_size;
- int32_t factor_max;
- int32_t prediction_order;
-} ConstTables;
-
-static ConstTables tables[2][NB_SUBBANDS] = {
+ConstTables ff_aptx_quant_tables[2][NB_SUBBANDS] = {
{
[LF] = { quantize_intervals_LF,
invert_quantize_dither_factors_LF,
@@ -456,24 +372,6 @@ static const int16_t quantization_factors[32] = {
};
-/* Rounded right shift with optionnal clipping */
-#define RSHIFT_SIZE(size) \
-av_always_inline \
-static int##size##_t rshift##size(int##size##_t value, int shift) \
-{ \
- int##size##_t rounding = (int##size##_t)1 << (shift - 1); \
- int##size##_t mask = ((int##size##_t)1 << (shift + 1)) - 1; \
- return ((value + rounding) >> shift) - ((value & mask) == rounding); \
-} \
-av_always_inline \
-static int##size##_t rshift##size##_clip24(int##size##_t value, int shift) \
-{ \
- return av_clip_intp2(rshift##size(value, shift), 23); \
-}
-RSHIFT_SIZE(32)
-RSHIFT_SIZE(64)
-
-
av_always_inline
static void aptx_update_codeword_history(Channel *channel)
{
@@ -483,7 +381,7 @@ static void aptx_update_codeword_history(Channel *channel)
channel->codeword_history = (cw << 8) + ((unsigned)channel->codeword_history << 4);
}
-static void aptx_generate_dither(Channel *channel)
+void ff_aptx_generate_dither(Channel *channel)
{
int subband;
int64_t m;
@@ -498,256 +396,6 @@ static void aptx_generate_dither(Channel *channel)
channel->dither_parity = (d >> 25) & 1;
}
-/*
- * Convolution filter coefficients for the outer QMF of the QMF tree.
- * The 2 sets are a mirror of each other.
- */
-static const int32_t aptx_qmf_outer_coeffs[NB_FILTERS][FILTER_TAPS] = {
- {
- 730, -413, -9611, 43626, -121026, 269973, -585547, 2801966,
- 697128, -160481, 27611, 8478, -10043, 3511, 688, -897,
- },
- {
- -897, 688, 3511, -10043, 8478, 27611, -160481, 697128,
- 2801966, -585547, 269973, -121026, 43626, -9611, -413, 730,
- },
-};
-
-/*
- * Convolution filter coefficients for the inner QMF of the QMF tree.
- * The 2 sets are a mirror of each other.
- */
-static const int32_t aptx_qmf_inner_coeffs[NB_FILTERS][FILTER_TAPS] = {
- {
- 1033, -584, -13592, 61697, -171156, 381799, -828088, 3962579,
- 985888, -226954, 39048, 11990, -14203, 4966, 973, -1268,
- },
- {
- -1268, 973, 4966, -14203, 11990, 39048, -226954, 985888,
- 3962579, -828088, 381799, -171156, 61697, -13592, -584, 1033,
- },
-};
-
-/*
- * Push one sample into a circular signal buffer.
- */
-av_always_inline
-static void aptx_qmf_filter_signal_push(FilterSignal *signal, int32_t sample)
-{
- signal->buffer[signal->pos ] = sample;
- signal->buffer[signal->pos+FILTER_TAPS] = sample;
- signal->pos = (signal->pos + 1) & (FILTER_TAPS - 1);
-}
-
-/*
- * Compute the convolution of the signal with the coefficients, and reduce
- * to 24 bits by applying the specified right shifting.
- */
-av_always_inline
-static int32_t aptx_qmf_convolution(FilterSignal *signal,
- const int32_t coeffs[FILTER_TAPS],
- int shift)
-{
- int32_t *sig = &signal->buffer[signal->pos];
- int64_t e = 0;
- int i;
-
- for (i = 0; i < FILTER_TAPS; i++)
- e += MUL64(sig[i], coeffs[i]);
-
- return rshift64_clip24(e, shift);
-}
-
-/*
- * Half-band QMF analysis filter realized with a polyphase FIR filter.
- * Split into 2 subbands and downsample by 2.
- * So for each pair of samples that goes in, one sample goes out,
- * split into 2 separate subbands.
- */
-av_always_inline
-static void aptx_qmf_polyphase_analysis(FilterSignal signal[NB_FILTERS],
- const int32_t coeffs[NB_FILTERS][FILTER_TAPS],
- int shift,
- int32_t samples[NB_FILTERS],
- int32_t *low_subband_output,
- int32_t *high_subband_output)
-{
- int32_t subbands[NB_FILTERS];
- int i;
-
- for (i = 0; i < NB_FILTERS; i++) {
- aptx_qmf_filter_signal_push(&signal[i], samples[NB_FILTERS-1-i]);
- subbands[i] = aptx_qmf_convolution(&signal[i], coeffs[i], shift);
- }
-
- *low_subband_output = av_clip_intp2(subbands[0] + subbands[1], 23);
- *high_subband_output = av_clip_intp2(subbands[0] - subbands[1], 23);
-}
-
-/*
- * Two stage QMF analysis tree.
- * Split 4 input samples into 4 subbands and downsample by 4.
- * So for each group of 4 samples that goes in, one sample goes out,
- * split into 4 separate subbands.
- */
-static void aptx_qmf_tree_analysis(QMFAnalysis *qmf,
- int32_t samples[4],
- int32_t subband_samples[4])
-{
- int32_t intermediate_samples[4];
- int i;
-
- /* Split 4 input samples into 2 intermediate subbands downsampled to 2 samples */
- for (i = 0; i < 2; i++)
- aptx_qmf_polyphase_analysis(qmf->outer_filter_signal,
- aptx_qmf_outer_coeffs, 23,
- &samples[2*i],
- &intermediate_samples[0+i],
- &intermediate_samples[2+i]);
-
- /* Split 2 intermediate subband samples into 4 final subbands downsampled to 1 sample */
- for (i = 0; i < 2; i++)
- aptx_qmf_polyphase_analysis(qmf->inner_filter_signal[i],
- aptx_qmf_inner_coeffs, 23,
- &intermediate_samples[2*i],
- &subband_samples[2*i+0],
- &subband_samples[2*i+1]);
-}
-
-/*
- * Half-band QMF synthesis filter realized with a polyphase FIR filter.
- * Join 2 subbands and upsample by 2.
- * So for each 2 subbands sample that goes in, a pair of samples goes out.
- */
-av_always_inline
-static void aptx_qmf_polyphase_synthesis(FilterSignal signal[NB_FILTERS],
- const int32_t coeffs[NB_FILTERS][FILTER_TAPS],
- int shift,
- int32_t low_subband_input,
- int32_t high_subband_input,
- int32_t samples[NB_FILTERS])
-{
- int32_t subbands[NB_FILTERS];
- int i;
-
- subbands[0] = low_subband_input + high_subband_input;
- subbands[1] = low_subband_input - high_subband_input;
-
- for (i = 0; i < NB_FILTERS; i++) {
- aptx_qmf_filter_signal_push(&signal[i], subbands[1-i]);
- samples[i] = aptx_qmf_convolution(&signal[i], coeffs[i], shift);
- }
-}
-
-/*
- * Two stage QMF synthesis tree.
- * Join 4 subbands and upsample by 4.
- * So for each 4 subbands sample that goes in, a group of 4 samples goes out.
- */
-static void aptx_qmf_tree_synthesis(QMFAnalysis *qmf,
- int32_t subband_samples[4],
- int32_t samples[4])
-{
- int32_t intermediate_samples[4];
- int i;
-
- /* Join 4 subbands into 2 intermediate subbands upsampled to 2 samples. */
- for (i = 0; i < 2; i++)
- aptx_qmf_polyphase_synthesis(qmf->inner_filter_signal[i],
- aptx_qmf_inner_coeffs, 22,
- subband_samples[2*i+0],
- subband_samples[2*i+1],
- &intermediate_samples[2*i]);
-
- /* Join 2 samples from intermediate subbands upsampled to 4 samples. */
- for (i = 0; i < 2; i++)
- aptx_qmf_polyphase_synthesis(qmf->outer_filter_signal,
- aptx_qmf_outer_coeffs, 21,
- intermediate_samples[0+i],
- intermediate_samples[2+i],
- &samples[2*i]);
-}
-
-
-av_always_inline
-static int32_t aptx_bin_search(int32_t value, int32_t factor,
- const int32_t *intervals, int32_t nb_intervals)
-{
- int32_t idx = 0;
- int i;
-
- for (i = nb_intervals >> 1; i > 0; i >>= 1)
- if (MUL64(factor, intervals[idx + i]) <= ((int64_t)value << 24))
- idx += i;
-
- return idx;
-}
-
-static void aptx_quantize_difference(Quantize *quantize,
- int32_t sample_difference,
- int32_t dither,
- int32_t quantization_factor,
- ConstTables *tables)
-{
- const int32_t *intervals = tables->quantize_intervals;
- int32_t quantized_sample, dithered_sample, parity_change;
- int32_t d, mean, interval, inv, sample_difference_abs;
- int64_t error;
-
- sample_difference_abs = FFABS(sample_difference);
- sample_difference_abs = FFMIN(sample_difference_abs, (1 << 23) - 1);
-
- quantized_sample = aptx_bin_search(sample_difference_abs >> 4,
- quantization_factor,
- intervals, tables->tables_size);
-
- d = rshift32_clip24(MULH(dither, dither), 7) - (1 << 23);
- d = rshift64(MUL64(d, tables->quantize_dither_factors[quantized_sample]), 23);
-
- intervals += quantized_sample;
- mean = (intervals[1] + intervals[0]) / 2;
- interval = (intervals[1] - intervals[0]) * (-(sample_difference < 0) | 1);
-
- dithered_sample = rshift64_clip24(MUL64(dither, interval) + ((int64_t)av_clip_intp2(mean + d, 23) << 32), 32);
- error = ((int64_t)sample_difference_abs << 20) - MUL64(dithered_sample, quantization_factor);
- quantize->error = FFABS(rshift64(error, 23));
-
- parity_change = quantized_sample;
- if (error < 0)
- quantized_sample--;
- else
- parity_change--;
-
- inv = -(sample_difference < 0);
- quantize->quantized_sample = quantized_sample ^ inv;
- quantize->quantized_sample_parity_change = parity_change ^ inv;
-}
-
-static void aptx_encode_channel(Channel *channel, int32_t samples[4], int hd)
-{
- int32_t subband_samples[4];
- int subband;
- aptx_qmf_tree_analysis(&channel->qmf, samples, subband_samples);
- aptx_generate_dither(channel);
- for (subband = 0; subband < NB_SUBBANDS; subband++) {
- int32_t diff = av_clip_intp2(subband_samples[subband] - channel->prediction[subband].predicted_sample, 23);
- aptx_quantize_difference(&channel->quantize[subband], diff,
- channel->dither[subband],
- channel->invert_quantize[subband].quantization_factor,
- &tables[hd][subband]);
- }
-}
-
-static void aptx_decode_channel(Channel *channel, int32_t samples[4])
-{
- int32_t subband_samples[4];
- int subband;
- for (subband = 0; subband < NB_SUBBANDS; subband++)
- subband_samples[subband] = channel->prediction[subband].previous_reconstructed_sample;
- aptx_qmf_tree_synthesis(&channel->qmf, subband_samples, samples);
-}
-
-
static void aptx_invert_quantization(InvertQuantize *invert_quantize,
int32_t quantized_sample, int32_t dither,
ConstTables *tables)
@@ -845,7 +493,7 @@ static void aptx_process_subband(InvertQuantize *invert_quantize,
tables->prediction_order);
}
-static void aptx_invert_quantize_and_prediction(Channel *channel, int hd)
+void ff_aptx_invert_quantize_and_prediction(Channel *channel, int hd)
{
int subband;
for (subband = 0; subband < NB_SUBBANDS; subband++)
@@ -853,138 +501,10 @@ static void aptx_invert_quantize_and_prediction(Channel *channel, int hd)
&channel->prediction[subband],
channel->quantize[subband].quantized_sample,
channel->dither[subband],
- &tables[hd][subband]);
-}
-
-static int32_t aptx_quantized_parity(Channel *channel)
-{
- int32_t parity = channel->dither_parity;
- int subband;
-
- for (subband = 0; subband < NB_SUBBANDS; subband++)
- parity ^= channel->quantize[subband].quantized_sample;
-
- return parity & 1;
-}
-
-/* For each sample, ensure that the parity of all subbands of all channels
- * is 0 except once every 8 samples where the parity is forced to 1. */
-static int aptx_check_parity(Channel channels[NB_CHANNELS], int32_t *idx)
-{
- int32_t parity = aptx_quantized_parity(&channels[LEFT])
- ^ aptx_quantized_parity(&channels[RIGHT]);
-
- int eighth = *idx == 7;
- *idx = (*idx + 1) & 7;
-
- return parity ^ eighth;
-}
-
-static void aptx_insert_sync(Channel channels[NB_CHANNELS], int32_t *idx)
-{
- if (aptx_check_parity(channels, idx)) {
- int i;
- Channel *c;
- static const int map[] = { 1, 2, 0, 3 };
- Quantize *min = &channels[NB_CHANNELS-1].quantize[map[0]];
- for (c = &channels[NB_CHANNELS-1]; c >= channels; c--)
- for (i = 0; i < NB_SUBBANDS; i++)
- if (c->quantize[map[i]].error < min->error)
- min = &c->quantize[map[i]];
-
- /* Forcing the desired parity is done by offsetting by 1 the quantized
- * sample from the subband featuring the smallest quantization error. */
- min->quantized_sample = min->quantized_sample_parity_change;
- }
-}
-
-static uint16_t aptx_pack_codeword(Channel *channel)
-{
- int32_t parity = aptx_quantized_parity(channel);
- return (((channel->quantize[3].quantized_sample & 0x06) | parity) << 13)
- | (((channel->quantize[2].quantized_sample & 0x03) ) << 11)
- | (((channel->quantize[1].quantized_sample & 0x0F) ) << 7)
- | (((channel->quantize[0].quantized_sample & 0x7F) ) << 0);
-}
-
-static uint32_t aptxhd_pack_codeword(Channel *channel)
-{
- int32_t parity = aptx_quantized_parity(channel);
- return (((channel->quantize[3].quantized_sample & 0x01E) | parity) << 19)
- | (((channel->quantize[2].quantized_sample & 0x00F) ) << 15)
- | (((channel->quantize[1].quantized_sample & 0x03F) ) << 9)
- | (((channel->quantize[0].quantized_sample & 0x1FF) ) << 0);
-}
-
-static void aptx_unpack_codeword(Channel *channel, uint16_t codeword)
-{
- channel->quantize[0].quantized_sample = sign_extend(codeword >> 0, 7);
- channel->quantize[1].quantized_sample = sign_extend(codeword >> 7, 4);
- channel->quantize[2].quantized_sample = sign_extend(codeword >> 11, 2);
- channel->quantize[3].quantized_sample = sign_extend(codeword >> 13, 3);
- channel->quantize[3].quantized_sample = (channel->quantize[3].quantized_sample & ~1)
- | aptx_quantized_parity(channel);
-}
-
-static void aptxhd_unpack_codeword(Channel *channel, uint32_t codeword)
-{
- channel->quantize[0].quantized_sample = sign_extend(codeword >> 0, 9);
- channel->quantize[1].quantized_sample = sign_extend(codeword >> 9, 6);
- channel->quantize[2].quantized_sample = sign_extend(codeword >> 15, 4);
- channel->quantize[3].quantized_sample = sign_extend(codeword >> 19, 5);
- channel->quantize[3].quantized_sample = (channel->quantize[3].quantized_sample & ~1)
- | aptx_quantized_parity(channel);
-}
-
-static void aptx_encode_samples(AptXContext *ctx,
- int32_t samples[NB_CHANNELS][4],
- uint8_t *output)
-{
- int channel;
- for (channel = 0; channel < NB_CHANNELS; channel++)
- aptx_encode_channel(&ctx->channels[channel], samples[channel], ctx->hd);
-
- aptx_insert_sync(ctx->channels, &ctx->sync_idx);
-
- for (channel = 0; channel < NB_CHANNELS; channel++) {
- aptx_invert_quantize_and_prediction(&ctx->channels[channel], ctx->hd);
- if (ctx->hd)
- AV_WB24(output + 3*channel,
- aptxhd_pack_codeword(&ctx->channels[channel]));
- else
- AV_WB16(output + 2*channel,
- aptx_pack_codeword(&ctx->channels[channel]));
- }
+ &ff_aptx_quant_tables[hd][subband]);
}
-static int aptx_decode_samples(AptXContext *ctx,
- const uint8_t *input,
- int32_t samples[NB_CHANNELS][4])
-{
- int channel, ret;
-
- for (channel = 0; channel < NB_CHANNELS; channel++) {
- aptx_generate_dither(&ctx->channels[channel]);
-
- if (ctx->hd)
- aptxhd_unpack_codeword(&ctx->channels[channel],
- AV_RB24(input + 3*channel));
- else
- aptx_unpack_codeword(&ctx->channels[channel],
- AV_RB16(input + 2*channel));
- aptx_invert_quantize_and_prediction(&ctx->channels[channel], ctx->hd);
- }
-
- ret = aptx_check_parity(ctx->channels, &ctx->sync_idx);
-
- for (channel = 0; channel < NB_CHANNELS; channel++)
- aptx_decode_channel(&ctx->channels[channel], samples[channel]);
-
- return ret;
-}
-
-
-static av_cold int aptx_init(AVCodecContext *avctx)
+av_cold int ff_aptx_init(AVCodecContext *avctx)
{
AptXContext *s = avctx->priv_data;
int chan, subband;
@@ -1016,150 +536,3 @@ static av_cold int aptx_init(AVCodecContext *avctx)
ff_af_queue_init(avctx, &s->afq);
return 0;
}
-
-static int aptx_decode_frame(AVCodecContext *avctx, void *data,
- int *got_frame_ptr, AVPacket *avpkt)
-{
- AptXContext *s = avctx->priv_data;
- AVFrame *frame = data;
- int pos, opos, channel, sample, ret;
-
- if (avpkt->size < s->block_size) {
- av_log(avctx, AV_LOG_ERROR, "Packet is too small\n");
- return AVERROR_INVALIDDATA;
- }
-
- /* get output buffer */
- frame->channels = NB_CHANNELS;
- frame->format = AV_SAMPLE_FMT_S32P;
- frame->nb_samples = 4 * avpkt->size / s->block_size;
- if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
- return ret;
-
- for (pos = 0, opos = 0; opos < frame->nb_samples; pos += s->block_size, opos += 4) {
- int32_t samples[NB_CHANNELS][4];
-
- if (aptx_decode_samples(s, &avpkt->data[pos], samples)) {
- av_log(avctx, AV_LOG_ERROR, "Synchronization error\n");
- return AVERROR_INVALIDDATA;
- }
-
- for (channel = 0; channel < NB_CHANNELS; channel++)
- for (sample = 0; sample < 4; sample++)
- AV_WN32A(&frame->data[channel][4*(opos+sample)],
- samples[channel][sample] * 256);
- }
-
- *got_frame_ptr = 1;
- return s->block_size * frame->nb_samples / 4;
-}
-
-static int aptx_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
- const AVFrame *frame, int *got_packet_ptr)
-{
- AptXContext *s = avctx->priv_data;
- int pos, ipos, channel, sample, output_size, ret;
-
- if ((ret = ff_af_queue_add(&s->afq, frame)) < 0)
- return ret;
-
- output_size = s->block_size * frame->nb_samples/4;
- if ((ret = ff_alloc_packet2(avctx, avpkt, output_size, 0)) < 0)
- return ret;
-
- for (pos = 0, ipos = 0; pos < output_size; pos += s->block_size, ipos += 4) {
- int32_t samples[NB_CHANNELS][4];
-
- for (channel = 0; channel < NB_CHANNELS; channel++)
- for (sample = 0; sample < 4; sample++)
- samples[channel][sample] = (int32_t)AV_RN32A(&frame->data[channel][4*(ipos+sample)]) >> 8;
-
- aptx_encode_samples(s, samples, avpkt->data + pos);
- }
-
- ff_af_queue_remove(&s->afq, frame->nb_samples, &avpkt->pts, &avpkt->duration);
- *got_packet_ptr = 1;
- return 0;
-}
-
-static av_cold int aptx_close(AVCodecContext *avctx)
-{
- AptXContext *s = avctx->priv_data;
- ff_af_queue_close(&s->afq);
- return 0;
-}
-
-
-#if CONFIG_APTX_DECODER
-AVCodec ff_aptx_decoder = {
- .name = "aptx",
- .long_name = NULL_IF_CONFIG_SMALL("aptX (Audio Processing Technology for Bluetooth)"),
- .type = AVMEDIA_TYPE_AUDIO,
- .id = AV_CODEC_ID_APTX,
- .priv_data_size = sizeof(AptXContext),
- .init = aptx_init,
- .decode = aptx_decode_frame,
- .close = aptx_close,
- .capabilities = AV_CODEC_CAP_DR1,
- .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
- .channel_layouts = (const uint64_t[]) { AV_CH_LAYOUT_STEREO, 0},
- .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S32P,
- AV_SAMPLE_FMT_NONE },
-};
-#endif
-
-#if CONFIG_APTX_HD_DECODER
-AVCodec ff_aptx_hd_decoder = {
- .name = "aptx_hd",
- .long_name = NULL_IF_CONFIG_SMALL("aptX HD (Audio Processing Technology for Bluetooth)"),
- .type = AVMEDIA_TYPE_AUDIO,
- .id = AV_CODEC_ID_APTX_HD,
- .priv_data_size = sizeof(AptXContext),
- .init = aptx_init,
- .decode = aptx_decode_frame,
- .close = aptx_close,
- .capabilities = AV_CODEC_CAP_DR1,
- .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
- .channel_layouts = (const uint64_t[]) { AV_CH_LAYOUT_STEREO, 0},
- .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S32P,
- AV_SAMPLE_FMT_NONE },
-};
-#endif
-
-#if CONFIG_APTX_ENCODER
-AVCodec ff_aptx_encoder = {
- .name = "aptx",
- .long_name = NULL_IF_CONFIG_SMALL("aptX (Audio Processing Technology for Bluetooth)"),
- .type = AVMEDIA_TYPE_AUDIO,
- .id = AV_CODEC_ID_APTX,
- .priv_data_size = sizeof(AptXContext),
- .init = aptx_init,
- .encode2 = aptx_encode_frame,
- .close = aptx_close,
- .capabilities = AV_CODEC_CAP_SMALL_LAST_FRAME,
- .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
- .channel_layouts = (const uint64_t[]) { AV_CH_LAYOUT_STEREO, 0},
- .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S32P,
- AV_SAMPLE_FMT_NONE },
- .supported_samplerates = (const int[]) {8000, 16000, 24000, 32000, 44100, 48000, 0},
-};
-#endif
-
-#if CONFIG_APTX_HD_ENCODER
-AVCodec ff_aptx_hd_encoder = {
- .name = "aptx_hd",
- .long_name = NULL_IF_CONFIG_SMALL("aptX HD (Audio Processing Technology for Bluetooth)"),
- .type = AVMEDIA_TYPE_AUDIO,
- .id = AV_CODEC_ID_APTX_HD,
- .priv_data_size = sizeof(AptXContext),
- .init = aptx_init,
- .encode2 = aptx_encode_frame,
- .close = aptx_close,
- .capabilities = AV_CODEC_CAP_SMALL_LAST_FRAME,
- .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
- .channel_layouts = (const uint64_t[]) { AV_CH_LAYOUT_STEREO, 0},
- .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S32P,
- AV_SAMPLE_FMT_NONE },
- .supported_samplerates = (const int[]) {8000, 16000, 24000, 32000, 44100, 48000, 0},
-};
-#endif
diff --git a/libavcodec/aptx.h b/libavcodec/aptx.h
new file mode 100644
index 0000000000..ce3d7dc6c1
--- /dev/null
+++ b/libavcodec/aptx.h
@@ -0,0 +1,220 @@
+/*
+ * Audio Processing Technology codec for Bluetooth (aptX)
+ *
+ * Copyright (C) 2017 Aurelien Jacobs <aurel at gnuage.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_APTX_H
+#define AVCODEC_APTX_H
+
+#include "libavutil/intreadwrite.h"
+#include "avcodec.h"
+#include "internal.h"
+#include "mathops.h"
+#include "audio_frame_queue.h"
+
+
+enum channels {
+ LEFT,
+ RIGHT,
+ NB_CHANNELS
+};
+
+enum subbands {
+ LF, // Low Frequency (0-5.5 kHz)
+ MLF, // Medium-Low Frequency (5.5-11kHz)
+ MHF, // Medium-High Frequency (11-16.5kHz)
+ HF, // High Frequency (16.5-22kHz)
+ NB_SUBBANDS
+};
+
+#define NB_FILTERS 2
+#define FILTER_TAPS 16
+
+typedef struct {
+ int pos;
+ int32_t buffer[2*FILTER_TAPS];
+} FilterSignal;
+
+typedef struct {
+ FilterSignal outer_filter_signal[NB_FILTERS];
+ FilterSignal inner_filter_signal[NB_FILTERS][NB_FILTERS];
+} QMFAnalysis;
+
+typedef struct {
+ int32_t quantized_sample;
+ int32_t quantized_sample_parity_change;
+ int32_t error;
+} Quantize;
+
+typedef struct {
+ int32_t quantization_factor;
+ int32_t factor_select;
+ int32_t reconstructed_difference;
+} InvertQuantize;
+
+typedef struct {
+ int32_t prev_sign[2];
+ int32_t s_weight[2];
+ int32_t d_weight[24];
+ int32_t pos;
+ int32_t reconstructed_differences[48];
+ int32_t previous_reconstructed_sample;
+ int32_t predicted_difference;
+ int32_t predicted_sample;
+} Prediction;
+
+typedef struct {
+ int32_t codeword_history;
+ int32_t dither_parity;
+ int32_t dither[NB_SUBBANDS];
+
+ QMFAnalysis qmf;
+ Quantize quantize[NB_SUBBANDS];
+ InvertQuantize invert_quantize[NB_SUBBANDS];
+ Prediction prediction[NB_SUBBANDS];
+} Channel;
+
+typedef struct {
+ int hd;
+ int block_size;
+ int32_t sync_idx;
+ Channel channels[NB_CHANNELS];
+ AudioFrameQueue afq;
+} AptXContext;
+
+typedef const struct {
+ const int32_t *quantize_intervals;
+ const int32_t *invert_quantize_dither_factors;
+ const int32_t *quantize_dither_factors;
+ const int16_t *quantize_factor_select_offset;
+ int tables_size;
+ int32_t factor_max;
+ int32_t prediction_order;
+} ConstTables;
+
+extern ConstTables ff_aptx_quant_tables[2][NB_SUBBANDS];
+
+/* Rounded right shift with optionnal clipping */
+#define RSHIFT_SIZE(size) \
+av_always_inline \
+static int##size##_t rshift##size(int##size##_t value, int shift) \
+{ \
+ int##size##_t rounding = (int##size##_t)1 << (shift - 1); \
+ int##size##_t mask = ((int##size##_t)1 << (shift + 1)) - 1; \
+ return ((value + rounding) >> shift) - ((value & mask) == rounding); \
+} \
+av_always_inline \
+static int##size##_t rshift##size##_clip24(int##size##_t value, int shift) \
+{ \
+ return av_clip_intp2(rshift##size(value, shift), 23); \
+}
+RSHIFT_SIZE(32)
+RSHIFT_SIZE(64)
+
+/*
+ * Convolution filter coefficients for the outer QMF of the QMF tree.
+ * The 2 sets are a mirror of each other.
+ */
+static const int32_t aptx_qmf_outer_coeffs[NB_FILTERS][FILTER_TAPS] = {
+ {
+ 730, -413, -9611, 43626, -121026, 269973, -585547, 2801966,
+ 697128, -160481, 27611, 8478, -10043, 3511, 688, -897,
+ },
+ {
+ -897, 688, 3511, -10043, 8478, 27611, -160481, 697128,
+ 2801966, -585547, 269973, -121026, 43626, -9611, -413, 730,
+ },
+};
+
+/*
+ * Convolution filter coefficients for the inner QMF of the QMF tree.
+ * The 2 sets are a mirror of each other.
+ */
+static const int32_t aptx_qmf_inner_coeffs[NB_FILTERS][FILTER_TAPS] = {
+ {
+ 1033, -584, -13592, 61697, -171156, 381799, -828088, 3962579,
+ 985888, -226954, 39048, 11990, -14203, 4966, 973, -1268,
+ },
+ {
+ -1268, 973, 4966, -14203, 11990, 39048, -226954, 985888,
+ 3962579, -828088, 381799, -171156, 61697, -13592, -584, 1033,
+ },
+};
+
+/*
+ * Push one sample into a circular signal buffer.
+ */
+av_always_inline
+static void aptx_qmf_filter_signal_push(FilterSignal *signal, int32_t sample)
+{
+ signal->buffer[signal->pos ] = sample;
+ signal->buffer[signal->pos+FILTER_TAPS] = sample;
+ signal->pos = (signal->pos + 1) & (FILTER_TAPS - 1);
+}
+
+/*
+ * Compute the convolution of the signal with the coefficients, and reduce
+ * to 24 bits by applying the specified right shifting.
+ */
+av_always_inline
+static int32_t aptx_qmf_convolution(FilterSignal *signal,
+ const int32_t coeffs[FILTER_TAPS],
+ int shift)
+{
+ int32_t *sig = &signal->buffer[signal->pos];
+ int64_t e = 0;
+ int i;
+
+ for (i = 0; i < FILTER_TAPS; i++)
+ e += MUL64(sig[i], coeffs[i]);
+
+ return rshift64_clip24(e, shift);
+}
+
+static inline int32_t aptx_quantized_parity(Channel *channel)
+{
+ int32_t parity = channel->dither_parity;
+ int subband;
+
+ for (subband = 0; subband < NB_SUBBANDS; subband++)
+ parity ^= channel->quantize[subband].quantized_sample;
+
+ return parity & 1;
+}
+
+/* For each sample, ensure that the parity of all subbands of all channels
+ * is 0 except once every 8 samples where the parity is forced to 1. */
+static inline int aptx_check_parity(Channel channels[NB_CHANNELS], int32_t *idx)
+{
+ int32_t parity = aptx_quantized_parity(&channels[LEFT])
+ ^ aptx_quantized_parity(&channels[RIGHT]);
+
+ int eighth = *idx == 7;
+ *idx = (*idx + 1) & 7;
+
+ return parity ^ eighth;
+}
+
+void ff_aptx_invert_quantize_and_prediction(Channel *channel, int hd);
+void ff_aptx_generate_dither(Channel *channel);
+
+int ff_aptx_init(AVCodecContext *avctx);
+
+#endif /* AVCODEC_APTX_H */
diff --git a/libavcodec/aptxdec.c b/libavcodec/aptxdec.c
new file mode 100644
index 0000000000..3bbf0104df
--- /dev/null
+++ b/libavcodec/aptxdec.c
@@ -0,0 +1,204 @@
+/*
+ * Audio Processing Technology codec for Bluetooth (aptX)
+ *
+ * Copyright (C) 2017 Aurelien Jacobs <aurel at gnuage.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "aptx.h"
+
+/*
+ * Half-band QMF synthesis filter realized with a polyphase FIR filter.
+ * Join 2 subbands and upsample by 2.
+ * So for each 2 subbands sample that goes in, a pair of samples goes out.
+ */
+av_always_inline
+static void aptx_qmf_polyphase_synthesis(FilterSignal signal[NB_FILTERS],
+ const int32_t coeffs[NB_FILTERS][FILTER_TAPS],
+ int shift,
+ int32_t low_subband_input,
+ int32_t high_subband_input,
+ int32_t samples[NB_FILTERS])
+{
+ int32_t subbands[NB_FILTERS];
+ int i;
+
+ subbands[0] = low_subband_input + high_subband_input;
+ subbands[1] = low_subband_input - high_subband_input;
+
+ for (i = 0; i < NB_FILTERS; i++) {
+ aptx_qmf_filter_signal_push(&signal[i], subbands[1-i]);
+ samples[i] = aptx_qmf_convolution(&signal[i], coeffs[i], shift);
+ }
+}
+
+/*
+ * Two stage QMF synthesis tree.
+ * Join 4 subbands and upsample by 4.
+ * So for each 4 subbands sample that goes in, a group of 4 samples goes out.
+ */
+static void aptx_qmf_tree_synthesis(QMFAnalysis *qmf,
+ int32_t subband_samples[4],
+ int32_t samples[4])
+{
+ int32_t intermediate_samples[4];
+ int i;
+
+ /* Join 4 subbands into 2 intermediate subbands upsampled to 2 samples. */
+ for (i = 0; i < 2; i++)
+ aptx_qmf_polyphase_synthesis(qmf->inner_filter_signal[i],
+ aptx_qmf_inner_coeffs, 22,
+ subband_samples[2*i+0],
+ subband_samples[2*i+1],
+ &intermediate_samples[2*i]);
+
+ /* Join 2 samples from intermediate subbands upsampled to 4 samples. */
+ for (i = 0; i < 2; i++)
+ aptx_qmf_polyphase_synthesis(qmf->outer_filter_signal,
+ aptx_qmf_outer_coeffs, 21,
+ intermediate_samples[0+i],
+ intermediate_samples[2+i],
+ &samples[2*i]);
+}
+
+
+static void aptx_decode_channel(Channel *channel, int32_t samples[4])
+{
+ int32_t subband_samples[4];
+ int subband;
+ for (subband = 0; subband < NB_SUBBANDS; subband++)
+ subband_samples[subband] = channel->prediction[subband].previous_reconstructed_sample;
+ aptx_qmf_tree_synthesis(&channel->qmf, subband_samples, samples);
+}
+
+static void aptx_unpack_codeword(Channel *channel, uint16_t codeword)
+{
+ channel->quantize[0].quantized_sample = sign_extend(codeword >> 0, 7);
+ channel->quantize[1].quantized_sample = sign_extend(codeword >> 7, 4);
+ channel->quantize[2].quantized_sample = sign_extend(codeword >> 11, 2);
+ channel->quantize[3].quantized_sample = sign_extend(codeword >> 13, 3);
+ channel->quantize[3].quantized_sample = (channel->quantize[3].quantized_sample & ~1)
+ | aptx_quantized_parity(channel);
+}
+
+static void aptxhd_unpack_codeword(Channel *channel, uint32_t codeword)
+{
+ channel->quantize[0].quantized_sample = sign_extend(codeword >> 0, 9);
+ channel->quantize[1].quantized_sample = sign_extend(codeword >> 9, 6);
+ channel->quantize[2].quantized_sample = sign_extend(codeword >> 15, 4);
+ channel->quantize[3].quantized_sample = sign_extend(codeword >> 19, 5);
+ channel->quantize[3].quantized_sample = (channel->quantize[3].quantized_sample & ~1)
+ | aptx_quantized_parity(channel);
+}
+
+static int aptx_decode_samples(AptXContext *ctx,
+ const uint8_t *input,
+ int32_t samples[NB_CHANNELS][4])
+{
+ int channel, ret;
+
+ for (channel = 0; channel < NB_CHANNELS; channel++) {
+ ff_aptx_generate_dither(&ctx->channels[channel]);
+
+ if (ctx->hd)
+ aptxhd_unpack_codeword(&ctx->channels[channel],
+ AV_RB24(input + 3*channel));
+ else
+ aptx_unpack_codeword(&ctx->channels[channel],
+ AV_RB16(input + 2*channel));
+ ff_aptx_invert_quantize_and_prediction(&ctx->channels[channel], ctx->hd);
+ }
+
+ ret = aptx_check_parity(ctx->channels, &ctx->sync_idx);
+
+ for (channel = 0; channel < NB_CHANNELS; channel++)
+ aptx_decode_channel(&ctx->channels[channel], samples[channel]);
+
+ return ret;
+}
+
+static int aptx_decode_frame(AVCodecContext *avctx, void *data,
+ int *got_frame_ptr, AVPacket *avpkt)
+{
+ AptXContext *s = avctx->priv_data;
+ AVFrame *frame = data;
+ int pos, opos, channel, sample, ret;
+
+ if (avpkt->size < s->block_size) {
+ av_log(avctx, AV_LOG_ERROR, "Packet is too small\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ /* get output buffer */
+ frame->channels = NB_CHANNELS;
+ frame->format = AV_SAMPLE_FMT_S32P;
+ frame->nb_samples = 4 * avpkt->size / s->block_size;
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
+ return ret;
+
+ for (pos = 0, opos = 0; opos < frame->nb_samples; pos += s->block_size, opos += 4) {
+ int32_t samples[NB_CHANNELS][4];
+
+ if (aptx_decode_samples(s, &avpkt->data[pos], samples)) {
+ av_log(avctx, AV_LOG_ERROR, "Synchronization error\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ for (channel = 0; channel < NB_CHANNELS; channel++)
+ for (sample = 0; sample < 4; sample++)
+ AV_WN32A(&frame->data[channel][4*(opos+sample)],
+ samples[channel][sample] * 256);
+ }
+
+ *got_frame_ptr = 1;
+ return s->block_size * frame->nb_samples / 4;
+}
+
+#if CONFIG_APTX_DECODER
+AVCodec ff_aptx_decoder = {
+ .name = "aptx",
+ .long_name = NULL_IF_CONFIG_SMALL("aptX (Audio Processing Technology for Bluetooth)"),
+ .type = AVMEDIA_TYPE_AUDIO,
+ .id = AV_CODEC_ID_APTX,
+ .priv_data_size = sizeof(AptXContext),
+ .init = ff_aptx_init,
+ .decode = aptx_decode_frame,
+ .capabilities = AV_CODEC_CAP_DR1,
+ .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
+ .channel_layouts = (const uint64_t[]) { AV_CH_LAYOUT_STEREO, 0},
+ .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S32P,
+ AV_SAMPLE_FMT_NONE },
+};
+#endif
+
+#if CONFIG_APTX_HD_DECODER
+AVCodec ff_aptx_hd_decoder = {
+ .name = "aptx_hd",
+ .long_name = NULL_IF_CONFIG_SMALL("aptX HD (Audio Processing Technology for Bluetooth)"),
+ .type = AVMEDIA_TYPE_AUDIO,
+ .id = AV_CODEC_ID_APTX_HD,
+ .priv_data_size = sizeof(AptXContext),
+ .init = ff_aptx_init,
+ .decode = aptx_decode_frame,
+ .capabilities = AV_CODEC_CAP_DR1,
+ .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
+ .channel_layouts = (const uint64_t[]) { AV_CH_LAYOUT_STEREO, 0},
+ .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S32P,
+ AV_SAMPLE_FMT_NONE },
+};
+#endif
diff --git a/libavcodec/aptxenc.c b/libavcodec/aptxenc.c
new file mode 100644
index 0000000000..60de73ec28
--- /dev/null
+++ b/libavcodec/aptxenc.c
@@ -0,0 +1,278 @@
+/*
+ * Audio Processing Technology codec for Bluetooth (aptX)
+ *
+ * Copyright (C) 2017 Aurelien Jacobs <aurel at gnuage.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "aptx.h"
+
+/*
+ * Half-band QMF analysis filter realized with a polyphase FIR filter.
+ * Split into 2 subbands and downsample by 2.
+ * So for each pair of samples that goes in, one sample goes out,
+ * split into 2 separate subbands.
+ */
+av_always_inline
+static void aptx_qmf_polyphase_analysis(FilterSignal signal[NB_FILTERS],
+ const int32_t coeffs[NB_FILTERS][FILTER_TAPS],
+ int shift,
+ int32_t samples[NB_FILTERS],
+ int32_t *low_subband_output,
+ int32_t *high_subband_output)
+{
+ int32_t subbands[NB_FILTERS];
+ int i;
+
+ for (i = 0; i < NB_FILTERS; i++) {
+ aptx_qmf_filter_signal_push(&signal[i], samples[NB_FILTERS-1-i]);
+ subbands[i] = aptx_qmf_convolution(&signal[i], coeffs[i], shift);
+ }
+
+ *low_subband_output = av_clip_intp2(subbands[0] + subbands[1], 23);
+ *high_subband_output = av_clip_intp2(subbands[0] - subbands[1], 23);
+}
+
+/*
+ * Two stage QMF analysis tree.
+ * Split 4 input samples into 4 subbands and downsample by 4.
+ * So for each group of 4 samples that goes in, one sample goes out,
+ * split into 4 separate subbands.
+ */
+static void aptx_qmf_tree_analysis(QMFAnalysis *qmf,
+ int32_t samples[4],
+ int32_t subband_samples[4])
+{
+ int32_t intermediate_samples[4];
+ int i;
+
+ /* Split 4 input samples into 2 intermediate subbands downsampled to 2 samples */
+ for (i = 0; i < 2; i++)
+ aptx_qmf_polyphase_analysis(qmf->outer_filter_signal,
+ aptx_qmf_outer_coeffs, 23,
+ &samples[2*i],
+ &intermediate_samples[0+i],
+ &intermediate_samples[2+i]);
+
+ /* Split 2 intermediate subband samples into 4 final subbands downsampled to 1 sample */
+ for (i = 0; i < 2; i++)
+ aptx_qmf_polyphase_analysis(qmf->inner_filter_signal[i],
+ aptx_qmf_inner_coeffs, 23,
+ &intermediate_samples[2*i],
+ &subband_samples[2*i+0],
+ &subband_samples[2*i+1]);
+}
+
+av_always_inline
+static int32_t aptx_bin_search(int32_t value, int32_t factor,
+ const int32_t *intervals, int32_t nb_intervals)
+{
+ int32_t idx = 0;
+ int i;
+
+ for (i = nb_intervals >> 1; i > 0; i >>= 1)
+ if (MUL64(factor, intervals[idx + i]) <= ((int64_t)value << 24))
+ idx += i;
+
+ return idx;
+}
+
+static void aptx_quantize_difference(Quantize *quantize,
+ int32_t sample_difference,
+ int32_t dither,
+ int32_t quantization_factor,
+ ConstTables *tables)
+{
+ const int32_t *intervals = tables->quantize_intervals;
+ int32_t quantized_sample, dithered_sample, parity_change;
+ int32_t d, mean, interval, inv, sample_difference_abs;
+ int64_t error;
+
+ sample_difference_abs = FFABS(sample_difference);
+ sample_difference_abs = FFMIN(sample_difference_abs, (1 << 23) - 1);
+
+ quantized_sample = aptx_bin_search(sample_difference_abs >> 4,
+ quantization_factor,
+ intervals, tables->tables_size);
+
+ d = rshift32_clip24(MULH(dither, dither), 7) - (1 << 23);
+ d = rshift64(MUL64(d, tables->quantize_dither_factors[quantized_sample]), 23);
+
+ intervals += quantized_sample;
+ mean = (intervals[1] + intervals[0]) / 2;
+ interval = (intervals[1] - intervals[0]) * (-(sample_difference < 0) | 1);
+
+ dithered_sample = rshift64_clip24(MUL64(dither, interval) + ((int64_t)av_clip_intp2(mean + d, 23) << 32), 32);
+ error = ((int64_t)sample_difference_abs << 20) - MUL64(dithered_sample, quantization_factor);
+ quantize->error = FFABS(rshift64(error, 23));
+
+ parity_change = quantized_sample;
+ if (error < 0)
+ quantized_sample--;
+ else
+ parity_change--;
+
+ inv = -(sample_difference < 0);
+ quantize->quantized_sample = quantized_sample ^ inv;
+ quantize->quantized_sample_parity_change = parity_change ^ inv;
+}
+
+static void aptx_encode_channel(Channel *channel, int32_t samples[4], int hd)
+{
+ int32_t subband_samples[4];
+ int subband;
+ aptx_qmf_tree_analysis(&channel->qmf, samples, subband_samples);
+ ff_aptx_generate_dither(channel);
+ for (subband = 0; subband < NB_SUBBANDS; subband++) {
+ int32_t diff = av_clip_intp2(subband_samples[subband] - channel->prediction[subband].predicted_sample, 23);
+ aptx_quantize_difference(&channel->quantize[subband], diff,
+ channel->dither[subband],
+ channel->invert_quantize[subband].quantization_factor,
+ &ff_aptx_quant_tables[hd][subband]);
+ }
+}
+
+static void aptx_insert_sync(Channel channels[NB_CHANNELS], int32_t *idx)
+{
+ if (aptx_check_parity(channels, idx)) {
+ int i;
+ Channel *c;
+ static const int map[] = { 1, 2, 0, 3 };
+ Quantize *min = &channels[NB_CHANNELS-1].quantize[map[0]];
+ for (c = &channels[NB_CHANNELS-1]; c >= channels; c--)
+ for (i = 0; i < NB_SUBBANDS; i++)
+ if (c->quantize[map[i]].error < min->error)
+ min = &c->quantize[map[i]];
+
+ /* Forcing the desired parity is done by offsetting by 1 the quantized
+ * sample from the subband featuring the smallest quantization error. */
+ min->quantized_sample = min->quantized_sample_parity_change;
+ }
+}
+
+static uint16_t aptx_pack_codeword(Channel *channel)
+{
+ int32_t parity = aptx_quantized_parity(channel);
+ return (((channel->quantize[3].quantized_sample & 0x06) | parity) << 13)
+ | (((channel->quantize[2].quantized_sample & 0x03) ) << 11)
+ | (((channel->quantize[1].quantized_sample & 0x0F) ) << 7)
+ | (((channel->quantize[0].quantized_sample & 0x7F) ) << 0);
+}
+
+static uint32_t aptxhd_pack_codeword(Channel *channel)
+{
+ int32_t parity = aptx_quantized_parity(channel);
+ return (((channel->quantize[3].quantized_sample & 0x01E) | parity) << 19)
+ | (((channel->quantize[2].quantized_sample & 0x00F) ) << 15)
+ | (((channel->quantize[1].quantized_sample & 0x03F) ) << 9)
+ | (((channel->quantize[0].quantized_sample & 0x1FF) ) << 0);
+}
+
+static void aptx_encode_samples(AptXContext *ctx,
+ int32_t samples[NB_CHANNELS][4],
+ uint8_t *output)
+{
+ int channel;
+ for (channel = 0; channel < NB_CHANNELS; channel++)
+ aptx_encode_channel(&ctx->channels[channel], samples[channel], ctx->hd);
+
+ aptx_insert_sync(ctx->channels, &ctx->sync_idx);
+
+ for (channel = 0; channel < NB_CHANNELS; channel++) {
+ ff_aptx_invert_quantize_and_prediction(&ctx->channels[channel], ctx->hd);
+ if (ctx->hd)
+ AV_WB24(output + 3*channel,
+ aptxhd_pack_codeword(&ctx->channels[channel]));
+ else
+ AV_WB16(output + 2*channel,
+ aptx_pack_codeword(&ctx->channels[channel]));
+ }
+}
+
+static int aptx_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
+ const AVFrame *frame, int *got_packet_ptr)
+{
+ AptXContext *s = avctx->priv_data;
+ int pos, ipos, channel, sample, output_size, ret;
+
+ if ((ret = ff_af_queue_add(&s->afq, frame)) < 0)
+ return ret;
+
+ output_size = s->block_size * frame->nb_samples/4;
+ if ((ret = ff_alloc_packet2(avctx, avpkt, output_size, 0)) < 0)
+ return ret;
+
+ for (pos = 0, ipos = 0; pos < output_size; pos += s->block_size, ipos += 4) {
+ int32_t samples[NB_CHANNELS][4];
+
+ for (channel = 0; channel < NB_CHANNELS; channel++)
+ for (sample = 0; sample < 4; sample++)
+ samples[channel][sample] = (int32_t)AV_RN32A(&frame->data[channel][4*(ipos+sample)]) >> 8;
+
+ aptx_encode_samples(s, samples, avpkt->data + pos);
+ }
+
+ ff_af_queue_remove(&s->afq, frame->nb_samples, &avpkt->pts, &avpkt->duration);
+ *got_packet_ptr = 1;
+ return 0;
+}
+
+static av_cold int aptx_close(AVCodecContext *avctx)
+{
+ AptXContext *s = avctx->priv_data;
+ ff_af_queue_close(&s->afq);
+ return 0;
+}
+
+#if CONFIG_APTX_ENCODER
+AVCodec ff_aptx_encoder = {
+ .name = "aptx",
+ .long_name = NULL_IF_CONFIG_SMALL("aptX (Audio Processing Technology for Bluetooth)"),
+ .type = AVMEDIA_TYPE_AUDIO,
+ .id = AV_CODEC_ID_APTX,
+ .priv_data_size = sizeof(AptXContext),
+ .init = ff_aptx_init,
+ .encode2 = aptx_encode_frame,
+ .close = aptx_close,
+ .capabilities = AV_CODEC_CAP_SMALL_LAST_FRAME,
+ .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
+ .channel_layouts = (const uint64_t[]) { AV_CH_LAYOUT_STEREO, 0},
+ .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S32P,
+ AV_SAMPLE_FMT_NONE },
+ .supported_samplerates = (const int[]) {8000, 16000, 24000, 32000, 44100, 48000, 0},
+};
+#endif
+
+#if CONFIG_APTX_HD_ENCODER
+AVCodec ff_aptx_hd_encoder = {
+ .name = "aptx_hd",
+ .long_name = NULL_IF_CONFIG_SMALL("aptX HD (Audio Processing Technology for Bluetooth)"),
+ .type = AVMEDIA_TYPE_AUDIO,
+ .id = AV_CODEC_ID_APTX_HD,
+ .priv_data_size = sizeof(AptXContext),
+ .init = ff_aptx_init,
+ .encode2 = aptx_encode_frame,
+ .close = aptx_close,
+ .capabilities = AV_CODEC_CAP_SMALL_LAST_FRAME,
+ .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
+ .channel_layouts = (const uint64_t[]) { AV_CH_LAYOUT_STEREO, 0},
+ .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S32P,
+ AV_SAMPLE_FMT_NONE },
+ .supported_samplerates = (const int[]) {8000, 16000, 24000, 32000, 44100, 48000, 0},
+};
+#endif
More information about the ffmpeg-cvslog
mailing list