Updated FFMPEG to version 1.1.2, using this project: http://sourceforge.net/projects/ffmpeg4android/

This commit is contained in:
Sergii Pylypenko
2013-02-21 18:29:51 +02:00
parent 758a9658d2
commit fff7a99a41
3492 changed files with 886704 additions and 5414 deletions

View File

@@ -0,0 +1,16 @@
MIPSFPU-OBJS-$(CONFIG_AMRNB_DECODER) += mips/acelp_filters_mips.o \
mips/celp_filters_mips.o \
mips/celp_math_mips.o \
mips/acelp_vectors_mips.o
MIPSFPU-OBJS-$(CONFIG_AMRWB_DECODER) += mips/acelp_filters_mips.o \
mips/celp_filters_mips.o \
mips/amrwbdec_mips.o \
mips/celp_math_mips.o \
mips/acelp_vectors_mips.o
MIPSFPU-OBJS-$(CONFIG_MPEGAUDIODSP) += mips/mpegaudiodsp_mips_float.o
MIPSDSPR1-OBJS-$(CONFIG_MPEGAUDIODSP) += mips/mpegaudiodsp_mips_fixed.o
OBJS-$(CONFIG_FFT) += mips/fft_init_table.o
MIPSFPU-OBJS-$(CONFIG_FFT) += mips/fft_mips.o
MIPSFPU-OBJS += mips/dsputil_mips.o \
mips/fmtconvert_mips.o
OBJS-$(CONFIG_AC3DSP) += mips/ac3dsp_mips.o

View File

@@ -0,0 +1,412 @@
/*
* Copyright (c) 2012
* MIPS Technologies, Inc., California.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* Authors: Branimir Vasic (bvasic@mips.com)
* Nedeljko Babic (nbabic@mips.com)
*
* Various AC-3 DSP Utils optimized for MIPS
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Reference: libavcodec/ac3dsp.c
*/
#include "config.h"
#include "libavcodec/ac3dsp.h"
#include "libavcodec/ac3.h"
#if HAVE_INLINE_ASM
#if HAVE_MIPSDSPR1
static void ac3_bit_alloc_calc_bap_mips(int16_t *mask, int16_t *psd,
int start, int end,
int snr_offset, int floor,
const uint8_t *bap_tab, uint8_t *bap)
{
int band, band_end, cond;
int m, address1, address2;
int16_t *psd1, *psd_end;
uint8_t *bap1;
if (snr_offset == -960) {
memset(bap, 0, AC3_MAX_COEFS);
return;
}
psd1 = &psd[start];
bap1 = &bap[start];
band = ff_ac3_bin_to_band_tab[start];
do {
m = (FFMAX(mask[band] - snr_offset - floor, 0) & 0x1FE0) + floor;
band_end = ff_ac3_band_start_tab[++band];
band_end = FFMIN(band_end, end);
psd_end = psd + band_end - 1;
__asm__ volatile (
"slt %[cond], %[psd1], %[psd_end] \n\t"
"beqz %[cond], 1f \n\t"
"2: \n\t"
"lh %[address1], 0(%[psd1]) \n\t"
"lh %[address2], 2(%[psd1]) \n\t"
"addiu %[psd1], %[psd1], 4 \n\t"
"subu %[address1], %[address1], %[m] \n\t"
"sra %[address1], %[address1], 5 \n\t"
"addiu %[address1], %[address1], -32 \n\t"
"shll_s.w %[address1], %[address1], 26 \n\t"
"subu %[address2], %[address2], %[m] \n\t"
"sra %[address2], %[address2], 5 \n\t"
"sra %[address1], %[address1], 26 \n\t"
"addiu %[address1], %[address1], 32 \n\t"
"lbux %[address1], %[address1](%[bap_tab]) \n\t"
"addiu %[address2], %[address2], -32 \n\t"
"shll_s.w %[address2], %[address2], 26 \n\t"
"sb %[address1], 0(%[bap1]) \n\t"
"slt %[cond], %[psd1], %[psd_end] \n\t"
"sra %[address2], %[address2], 26 \n\t"
"addiu %[address2], %[address2], 32 \n\t"
"lbux %[address2], %[address2](%[bap_tab]) \n\t"
"sb %[address2], 1(%[bap1]) \n\t"
"addiu %[bap1], %[bap1], 2 \n\t"
"bnez %[cond], 2b \n\t"
"addiu %[psd_end], %[psd_end], 2 \n\t"
"slt %[cond], %[psd1], %[psd_end] \n\t"
"beqz %[cond], 3f \n\t"
"1: \n\t"
"lh %[address1], 0(%[psd1]) \n\t"
"addiu %[psd1], %[psd1], 2 \n\t"
"subu %[address1], %[address1], %[m] \n\t"
"sra %[address1], %[address1], 5 \n\t"
"addiu %[address1], %[address1], -32 \n\t"
"shll_s.w %[address1], %[address1], 26 \n\t"
"sra %[address1], %[address1], 26 \n\t"
"addiu %[address1], %[address1], 32 \n\t"
"lbux %[address1], %[address1](%[bap_tab]) \n\t"
"sb %[address1], 0(%[bap1]) \n\t"
"addiu %[bap1], %[bap1], 1 \n\t"
"3: \n\t"
: [address1]"=&r"(address1), [address2]"=&r"(address2),
[cond]"=&r"(cond), [bap1]"+r"(bap1),
[psd1]"+r"(psd1), [psd_end]"+r"(psd_end)
: [m]"r"(m), [bap_tab]"r"(bap_tab)
: "memory"
);
} while (end > band_end);
}
static void ac3_update_bap_counts_mips(uint16_t mant_cnt[16], uint8_t *bap,
int len)
{
int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
__asm__ volatile (
"andi %[temp3], %[len], 3 \n\t"
"addu %[temp2], %[bap], %[len] \n\t"
"addu %[temp4], %[bap], %[temp3] \n\t"
"beq %[temp2], %[temp4], 4f \n\t"
"1: \n\t"
"lbu %[temp0], -1(%[temp2]) \n\t"
"lbu %[temp5], -2(%[temp2]) \n\t"
"lbu %[temp6], -3(%[temp2]) \n\t"
"sll %[temp0], %[temp0], 1 \n\t"
"addu %[temp0], %[mant_cnt], %[temp0] \n\t"
"sll %[temp5], %[temp5], 1 \n\t"
"addu %[temp5], %[mant_cnt], %[temp5] \n\t"
"lhu %[temp1], 0(%[temp0]) \n\t"
"sll %[temp6], %[temp6], 1 \n\t"
"addu %[temp6], %[mant_cnt], %[temp6] \n\t"
"addiu %[temp1], %[temp1], 1 \n\t"
"sh %[temp1], 0(%[temp0]) \n\t"
"lhu %[temp1], 0(%[temp5]) \n\t"
"lbu %[temp7], -4(%[temp2]) \n\t"
"addiu %[temp2], %[temp2], -4 \n\t"
"addiu %[temp1], %[temp1], 1 \n\t"
"sh %[temp1], 0(%[temp5]) \n\t"
"lhu %[temp1], 0(%[temp6]) \n\t"
"sll %[temp7], %[temp7], 1 \n\t"
"addu %[temp7], %[mant_cnt], %[temp7] \n\t"
"addiu %[temp1], %[temp1],1 \n\t"
"sh %[temp1], 0(%[temp6]) \n\t"
"lhu %[temp1], 0(%[temp7]) \n\t"
"addiu %[temp1], %[temp1], 1 \n\t"
"sh %[temp1], 0(%[temp7]) \n\t"
"bne %[temp2], %[temp4], 1b \n\t"
"4: \n\t"
"beqz %[temp3], 2f \n\t"
"3: \n\t"
"addiu %[temp3], %[temp3], -1 \n\t"
"lbu %[temp0], -1(%[temp2]) \n\t"
"addiu %[temp2], %[temp2], -1 \n\t"
"sll %[temp0], %[temp0], 1 \n\t"
"addu %[temp0], %[mant_cnt], %[temp0] \n\t"
"lhu %[temp1], 0(%[temp0]) \n\t"
"addiu %[temp1], %[temp1], 1 \n\t"
"sh %[temp1], 0(%[temp0]) \n\t"
"bgtz %[temp3], 3b \n\t"
"2: \n\t"
: [temp0] "=&r" (temp0), [temp1] "=&r" (temp1),
[temp2] "=&r" (temp2), [temp3] "=&r" (temp3),
[temp4] "=&r" (temp4), [temp5] "=&r" (temp5),
[temp6] "=&r" (temp6), [temp7] "=&r" (temp7)
: [len] "r" (len), [bap] "r" (bap),
[mant_cnt] "r" (mant_cnt)
: "memory"
);
}
#endif
#if HAVE_MIPSFPU && HAVE_MIPS32R2
static void float_to_fixed24_mips(int32_t *dst, const float *src, unsigned int len)
{
const float scale = 1 << 24;
float src0, src1, src2, src3, src4, src5, src6, src7;
int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
do {
__asm__ volatile (
"lwc1 %[src0], 0(%[src]) \n\t"
"lwc1 %[src1], 4(%[src]) \n\t"
"lwc1 %[src2], 8(%[src]) \n\t"
"lwc1 %[src3], 12(%[src]) \n\t"
"lwc1 %[src4], 16(%[src]) \n\t"
"lwc1 %[src5], 20(%[src]) \n\t"
"lwc1 %[src6], 24(%[src]) \n\t"
"lwc1 %[src7], 28(%[src]) \n\t"
"mul.s %[src0], %[src0], %[scale] \n\t"
"mul.s %[src1], %[src1], %[scale] \n\t"
"mul.s %[src2], %[src2], %[scale] \n\t"
"mul.s %[src3], %[src3], %[scale] \n\t"
"mul.s %[src4], %[src4], %[scale] \n\t"
"mul.s %[src5], %[src5], %[scale] \n\t"
"mul.s %[src6], %[src6], %[scale] \n\t"
"mul.s %[src7], %[src7], %[scale] \n\t"
"cvt.w.s %[src0], %[src0] \n\t"
"cvt.w.s %[src1], %[src1] \n\t"
"cvt.w.s %[src2], %[src2] \n\t"
"cvt.w.s %[src3], %[src3] \n\t"
"cvt.w.s %[src4], %[src4] \n\t"
"cvt.w.s %[src5], %[src5] \n\t"
"cvt.w.s %[src6], %[src6] \n\t"
"cvt.w.s %[src7], %[src7] \n\t"
"mfc1 %[temp0], %[src0] \n\t"
"mfc1 %[temp1], %[src1] \n\t"
"mfc1 %[temp2], %[src2] \n\t"
"mfc1 %[temp3], %[src3] \n\t"
"mfc1 %[temp4], %[src4] \n\t"
"mfc1 %[temp5], %[src5] \n\t"
"mfc1 %[temp6], %[src6] \n\t"
"mfc1 %[temp7], %[src7] \n\t"
"sw %[temp0], 0(%[dst]) \n\t"
"sw %[temp1], 4(%[dst]) \n\t"
"sw %[temp2], 8(%[dst]) \n\t"
"sw %[temp3], 12(%[dst]) \n\t"
"sw %[temp4], 16(%[dst]) \n\t"
"sw %[temp5], 20(%[dst]) \n\t"
"sw %[temp6], 24(%[dst]) \n\t"
"sw %[temp7], 28(%[dst]) \n\t"
: [dst] "+r" (dst), [src] "+r" (src),
[src0] "=&f" (src0), [src1] "=&f" (src1),
[src2] "=&f" (src2), [src3] "=&f" (src3),
[src4] "=&f" (src4), [src5] "=&f" (src5),
[src6] "=&f" (src6), [src7] "=&f" (src7),
[temp0] "=r" (temp0), [temp1] "=r" (temp1),
[temp2] "=r" (temp2), [temp3] "=r" (temp3),
[temp4] "=r" (temp4), [temp5] "=r" (temp5),
[temp6] "=r" (temp6), [temp7] "=r" (temp7)
: [scale] "f" (scale)
: "memory"
);
src = src + 8;
dst = dst + 8;
len -= 8;
} while (len > 0);
}
static void ac3_downmix_mips(float **samples, float (*matrix)[2],
int out_ch, int in_ch, int len)
{
int i, j, i1, i2, i3;
float v0, v1, v2, v3;
float v4, v5, v6, v7;
float samples0, samples1, samples2, samples3, matrix_j, matrix_j2;
float *samples_p,*matrix_p, **samples_x, **samples_end, **samples_sw;
__asm__ volatile(
".set push \n\t"
".set noreorder \n\t"
"li %[i1], 2 \n\t"
"sll %[len], 2 \n\t"
"move %[i], $zero \n\t"
"sll %[j], %[in_ch], 2 \n\t"
"bne %[out_ch], %[i1], 3f \n\t" // if (out_ch == 2)
" li %[i2], 1 \n\t"
"2: \n\t" // start of the for loop (for (i = 0; i < len; i+=4))
"move %[matrix_p], %[matrix] \n\t"
"move %[samples_x], %[samples] \n\t"
"mtc1 $zero, %[v0] \n\t"
"mtc1 $zero, %[v1] \n\t"
"mtc1 $zero, %[v2] \n\t"
"mtc1 $zero, %[v3] \n\t"
"mtc1 $zero, %[v4] \n\t"
"mtc1 $zero, %[v5] \n\t"
"mtc1 $zero, %[v6] \n\t"
"mtc1 $zero, %[v7] \n\t"
"addiu %[i1], %[i], 4 \n\t"
"addiu %[i2], %[i], 8 \n\t"
"lw %[samples_p], 0(%[samples_x]) \n\t"
"addiu %[i3], %[i], 12 \n\t"
"addu %[samples_end], %[samples_x], %[j] \n\t"
"move %[samples_sw], %[samples_p] \n\t"
"1: \n\t" // start of the inner for loop (for (j = 0; j < in_ch; j++))
"lwc1 %[matrix_j], 0(%[matrix_p]) \n\t"
"lwc1 %[matrix_j2], 4(%[matrix_p]) \n\t"
"lwxc1 %[samples0], %[i](%[samples_p]) \n\t"
"lwxc1 %[samples1], %[i1](%[samples_p]) \n\t"
"lwxc1 %[samples2], %[i2](%[samples_p]) \n\t"
"lwxc1 %[samples3], %[i3](%[samples_p]) \n\t"
"addiu %[matrix_p], 8 \n\t"
"addiu %[samples_x], 4 \n\t"
"madd.s %[v0], %[v0], %[samples0], %[matrix_j] \n\t"
"madd.s %[v1], %[v1], %[samples1], %[matrix_j] \n\t"
"madd.s %[v2], %[v2], %[samples2], %[matrix_j] \n\t"
"madd.s %[v3], %[v3], %[samples3], %[matrix_j] \n\t"
"madd.s %[v4], %[v4], %[samples0], %[matrix_j2]\n\t"
"madd.s %[v5], %[v5], %[samples1], %[matrix_j2]\n\t"
"madd.s %[v6], %[v6], %[samples2], %[matrix_j2]\n\t"
"madd.s %[v7], %[v7], %[samples3], %[matrix_j2]\n\t"
"bne %[samples_x], %[samples_end], 1b \n\t"
" lw %[samples_p], 0(%[samples_x]) \n\t"
"lw %[samples_p], 4(%[samples]) \n\t"
"swxc1 %[v0], %[i](%[samples_sw]) \n\t"
"swxc1 %[v1], %[i1](%[samples_sw]) \n\t"
"swxc1 %[v2], %[i2](%[samples_sw]) \n\t"
"swxc1 %[v3], %[i3](%[samples_sw]) \n\t"
"swxc1 %[v4], %[i](%[samples_p]) \n\t"
"addiu %[i], 16 \n\t"
"swxc1 %[v5], %[i1](%[samples_p]) \n\t"
"swxc1 %[v6], %[i2](%[samples_p]) \n\t"
"bne %[i], %[len], 2b \n\t"
" swxc1 %[v7], %[i3](%[samples_p]) \n\t"
"3: \n\t"
"bne %[out_ch], %[i2], 6f \n\t" // if (out_ch == 1)
" nop \n\t"
"5: \n\t" // start of the outer for loop (for (i = 0; i < len; i+=4))
"move %[matrix_p], %[matrix] \n\t"
"move %[samples_x], %[samples] \n\t"
"mtc1 $zero, %[v0] \n\t"
"mtc1 $zero, %[v1] \n\t"
"mtc1 $zero, %[v2] \n\t"
"mtc1 $zero, %[v3] \n\t"
"addiu %[i1], %[i], 4 \n\t"
"addiu %[i2], %[i], 8 \n\t"
"lw %[samples_p], 0(%[samples_x]) \n\t"
"addiu %[i3], %[i], 12 \n\t"
"addu %[samples_end], %[samples_x], %[j] \n\t"
"move %[samples_sw], %[samples_p] \n\t"
"4: \n\t" // start of the inner for loop (for (j = 0; j < in_ch; j++))
"lwc1 %[matrix_j], 0(%[matrix_p]) \n\t"
"lwxc1 %[samples0], %[i](%[samples_p]) \n\t"
"lwxc1 %[samples1], %[i1](%[samples_p]) \n\t"
"lwxc1 %[samples2], %[i2](%[samples_p]) \n\t"
"lwxc1 %[samples3], %[i3](%[samples_p]) \n\t"
"addiu %[matrix_p], 8 \n\t"
"addiu %[samples_x], 4 \n\t"
"madd.s %[v0], %[v0], %[samples0], %[matrix_j] \n\t"
"madd.s %[v1], %[v1], %[samples1], %[matrix_j] \n\t"
"madd.s %[v2], %[v2], %[samples2], %[matrix_j] \n\t"
"madd.s %[v3], %[v3], %[samples3], %[matrix_j] \n\t"
"bne %[samples_x], %[samples_end], 4b \n\t"
" lw %[samples_p], 0(%[samples_x]) \n\t"
"swxc1 %[v0], %[i](%[samples_sw]) \n\t"
"addiu %[i], 16 \n\t"
"swxc1 %[v1], %[i1](%[samples_sw]) \n\t"
"swxc1 %[v2], %[i2](%[samples_sw]) \n\t"
"bne %[i], %[len], 5b \n\t"
" swxc1 %[v3], %[i3](%[samples_sw]) \n\t"
"6: \n\t"
".set pop"
:[samples_p]"=&r"(samples_p), [matrix_j]"=&f"(matrix_j), [matrix_j2]"=&f"(matrix_j2),
[samples0]"=&f"(samples0), [samples1]"=&f"(samples1),
[samples2]"=&f"(samples2), [samples3]"=&f"(samples3),
[v0]"=&f"(v0), [v1]"=&f"(v1), [v2]"=&f"(v2), [v3]"=&f"(v3),
[v4]"=&f"(v4), [v5]"=&f"(v5), [v6]"=&f"(v6), [v7]"=&f"(v7),
[samples_x]"=&r"(samples_x), [matrix_p]"=&r"(matrix_p),
[samples_end]"=&r"(samples_end), [samples_sw]"=&r"(samples_sw),
[i1]"=&r"(i1), [i2]"=&r"(i2), [i3]"=&r"(i3), [i]"=&r"(i),
[j]"=&r"(j), [len]"+r"(len)
:[samples]"r"(samples), [matrix]"r"(matrix),
[in_ch]"r"(in_ch), [out_ch]"r"(out_ch)
:"memory"
);
}
#endif
#endif /* HAVE_INLINE_ASM */
void ff_ac3dsp_init_mips(AC3DSPContext *c, int bit_exact) {
#if HAVE_INLINE_ASM
#if HAVE_MIPSDSPR1
c->bit_alloc_calc_bap = ac3_bit_alloc_calc_bap_mips;
c->update_bap_counts = ac3_update_bap_counts_mips;
#endif
#if HAVE_MIPSFPU && HAVE_MIPS32R2
c->float_to_fixed24 = float_to_fixed24_mips;
c->downmix = ac3_downmix_mips;
#endif
#endif
}

View File

@@ -0,0 +1,215 @@
/*
* Copyright (c) 2012
* MIPS Technologies, Inc., California.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* Author: Nedeljko Babic (nbabic@mips.com)
*
* various filters for ACELP-based codecs optimized for MIPS
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Reference: libavcodec/acelp_filters.c
*/
#include "config.h"
#include "libavutil/attributes.h"
#include "libavcodec/acelp_filters.h"
#if HAVE_INLINE_ASM
static void ff_acelp_interpolatef_mips(float *out, const float *in,
const float *filter_coeffs, int precision,
int frac_pos, int filter_length, int length)
{
int n, i;
int prec = precision * 4;
int fc_offset = precision - frac_pos;
float in_val_p, in_val_m, fc_val_p, fc_val_m;
for (n = 0; n < length; n++) {
/**
* four pointers are defined in order to minimize number of
* computations done in inner loop
*/
const float *p_in_p = &in[n];
const float *p_in_m = &in[n-1];
const float *p_filter_coeffs_p = &filter_coeffs[frac_pos];
const float *p_filter_coeffs_m = filter_coeffs + fc_offset;
float v = 0;
for (i = 0; i < filter_length;i++) {
__asm__ volatile (
"lwc1 %[in_val_p], 0(%[p_in_p]) \n\t"
"lwc1 %[fc_val_p], 0(%[p_filter_coeffs_p]) \n\t"
"lwc1 %[in_val_m], 0(%[p_in_m]) \n\t"
"lwc1 %[fc_val_m], 0(%[p_filter_coeffs_m]) \n\t"
"addiu %[p_in_p], %[p_in_p], 4 \n\t"
"madd.s %[v],%[v], %[in_val_p],%[fc_val_p] \n\t"
"addiu %[p_in_m], %[p_in_m], -4 \n\t"
"addu %[p_filter_coeffs_p], %[p_filter_coeffs_p], %[prec] \n\t"
"addu %[p_filter_coeffs_m], %[p_filter_coeffs_m], %[prec] \n\t"
"madd.s %[v],%[v],%[in_val_m], %[fc_val_m] \n\t"
: [v] "=&f" (v),[p_in_p] "+r" (p_in_p), [p_in_m] "+r" (p_in_m),
[p_filter_coeffs_p] "+r" (p_filter_coeffs_p),
[in_val_p] "=&f" (in_val_p), [in_val_m] "=&f" (in_val_m),
[fc_val_p] "=&f" (fc_val_p), [fc_val_m] "=&f" (fc_val_m),
[p_filter_coeffs_m] "+r" (p_filter_coeffs_m)
: [prec] "r" (prec)
);
}
out[n] = v;
}
}
static void ff_acelp_apply_order_2_transfer_function_mips(float *out, const float *in,
const float zero_coeffs[2],
const float pole_coeffs[2],
float gain, float mem[2], int n)
{
/**
* loop is unrolled eight times
*/
__asm__ volatile (
"lwc1 $f0, 0(%[mem]) \n\t"
"blez %[n], ff_acelp_apply_order_2_transfer_function_end%= \n\t"
"lwc1 $f1, 4(%[mem]) \n\t"
"lwc1 $f2, 0(%[pole_coeffs]) \n\t"
"lwc1 $f3, 4(%[pole_coeffs]) \n\t"
"lwc1 $f4, 0(%[zero_coeffs]) \n\t"
"lwc1 $f5, 4(%[zero_coeffs]) \n\t"
"ff_acelp_apply_order_2_transfer_function_madd%=: \n\t"
"lwc1 $f6, 0(%[in]) \n\t"
"mul.s $f9, $f3, $f1 \n\t"
"mul.s $f7, $f2, $f0 \n\t"
"msub.s $f7, $f7, %[gain], $f6 \n\t"
"sub.s $f7, $f7, $f9 \n\t"
"madd.s $f8, $f7, $f4, $f0 \n\t"
"madd.s $f8, $f8, $f5, $f1 \n\t"
"lwc1 $f11, 4(%[in]) \n\t"
"mul.s $f12, $f3, $f0 \n\t"
"mul.s $f13, $f2, $f7 \n\t"
"msub.s $f13, $f13, %[gain], $f11 \n\t"
"sub.s $f13, $f13, $f12 \n\t"
"madd.s $f14, $f13, $f4, $f7 \n\t"
"madd.s $f14, $f14, $f5, $f0 \n\t"
"swc1 $f8, 0(%[out]) \n\t"
"lwc1 $f6, 8(%[in]) \n\t"
"mul.s $f9, $f3, $f7 \n\t"
"mul.s $f15, $f2, $f13 \n\t"
"msub.s $f15, $f15, %[gain], $f6 \n\t"
"sub.s $f15, $f15, $f9 \n\t"
"madd.s $f8, $f15, $f4, $f13 \n\t"
"madd.s $f8, $f8, $f5, $f7 \n\t"
"swc1 $f14, 4(%[out]) \n\t"
"lwc1 $f11, 12(%[in]) \n\t"
"mul.s $f12, $f3, $f13 \n\t"
"mul.s $f16, $f2, $f15 \n\t"
"msub.s $f16, $f16, %[gain], $f11 \n\t"
"sub.s $f16, $f16, $f12 \n\t"
"madd.s $f14, $f16, $f4, $f15 \n\t"
"madd.s $f14, $f14, $f5, $f13 \n\t"
"swc1 $f8, 8(%[out]) \n\t"
"lwc1 $f6, 16(%[in]) \n\t"
"mul.s $f9, $f3, $f15 \n\t"
"mul.s $f7, $f2, $f16 \n\t"
"msub.s $f7, $f7, %[gain], $f6 \n\t"
"sub.s $f7, $f7, $f9 \n\t"
"madd.s $f8, $f7, $f4, $f16 \n\t"
"madd.s $f8, $f8, $f5, $f15 \n\t"
"swc1 $f14, 12(%[out]) \n\t"
"lwc1 $f11, 20(%[in]) \n\t"
"mul.s $f12, $f3, $f16 \n\t"
"mul.s $f13, $f2, $f7 \n\t"
"msub.s $f13, $f13, %[gain], $f11 \n\t"
"sub.s $f13, $f13, $f12 \n\t"
"madd.s $f14, $f13, $f4, $f7 \n\t"
"madd.s $f14, $f14, $f5, $f16 \n\t"
"swc1 $f8, 16(%[out]) \n\t"
"lwc1 $f6, 24(%[in]) \n\t"
"mul.s $f9, $f3, $f7 \n\t"
"mul.s $f15, $f2, $f13 \n\t"
"msub.s $f15, $f15, %[gain], $f6 \n\t"
"sub.s $f1, $f15, $f9 \n\t"
"madd.s $f8, $f1, $f4, $f13 \n\t"
"madd.s $f8, $f8, $f5, $f7 \n\t"
"swc1 $f14, 20(%[out]) \n\t"
"lwc1 $f11, 28(%[in]) \n\t"
"mul.s $f12, $f3, $f13 \n\t"
"mul.s $f16, $f2, $f1 \n\t"
"msub.s $f16, $f16, %[gain], $f11 \n\t"
"sub.s $f0, $f16, $f12 \n\t"
"madd.s $f14, $f0, $f4, $f1 \n\t"
"madd.s $f14, $f14, $f5, $f13 \n\t"
"swc1 $f8, 24(%[out]) \n\t"
"addiu %[out], 32 \n\t"
"addiu %[in], 32 \n\t"
"addiu %[n], -8 \n\t"
"swc1 $f14, -4(%[out]) \n\t"
"bnez %[n], ff_acelp_apply_order_2_transfer_function_madd%= \n\t"
"swc1 $f1, 4(%[mem]) \n\t"
"swc1 $f0, 0(%[mem]) \n\t"
"ff_acelp_apply_order_2_transfer_function_end%=: \n\t"
: [out] "+r" (out),
[in] "+r" (in), [gain] "+f" (gain),
[n] "+r" (n), [mem] "+r" (mem)
: [zero_coeffs] "r" (zero_coeffs),
[pole_coeffs] "r" (pole_coeffs)
: "$f0", "$f1", "$f2", "$f3", "$f4", "$f5",
"$f6", "$f7", "$f8", "$f9", "$f10", "$f11",
"$f12", "$f13", "$f14", "$f15", "$f16"
);
}
#endif /* HAVE_INLINE_ASM */
void ff_acelp_filter_init_mips(ACELPFContext *c)
{
#if HAVE_INLINE_ASM
c->acelp_interpolatef = ff_acelp_interpolatef_mips;
c->acelp_apply_order_2_transfer_function = ff_acelp_apply_order_2_transfer_function_mips;
#endif
}

View File

@@ -0,0 +1,101 @@
/*
* Copyright (c) 2012
* MIPS Technologies, Inc., California.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* Author: Nedeljko Babic (nbabic@mips.com)
*
* adaptive and fixed codebook vector operations for ACELP-based codecs
* optimized for MIPS
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Reference: libavcodec/acelp_vectors.c
*/
#include "config.h"
#include "libavcodec/acelp_vectors.h"
#if HAVE_INLINE_ASM
static void ff_weighted_vector_sumf_mips(
float *out, const float *in_a, const float *in_b,
float weight_coeff_a, float weight_coeff_b, int length)
{
const float *a_end = in_a + length;
/* loop unrolled two times */
__asm__ volatile (
"blez %[length], ff_weighted_vector_sumf_end%= \n\t"
"ff_weighted_vector_sumf_madd%=: \n\t"
"lwc1 $f0, 0(%[in_a]) \n\t"
"lwc1 $f3, 4(%[in_a]) \n\t"
"lwc1 $f1, 0(%[in_b]) \n\t"
"lwc1 $f4, 4(%[in_b]) \n\t"
"mul.s $f2, %[weight_coeff_a], $f0 \n\t"
"mul.s $f5, %[weight_coeff_a], $f3 \n\t"
"madd.s $f2, $f2, %[weight_coeff_b], $f1 \n\t"
"madd.s $f5, $f5, %[weight_coeff_b], $f4 \n\t"
"addiu %[in_a], 8 \n\t"
"addiu %[in_b], 8 \n\t"
"swc1 $f2, 0(%[out]) \n\t"
"swc1 $f5, 4(%[out]) \n\t"
"addiu %[out], 8 \n\t"
"bne %[in_a], %[a_end], ff_weighted_vector_sumf_madd%= \n\t"
"ff_weighted_vector_sumf_end%=: \n\t"
: [out] "+r" (out), [in_a] "+r" (in_a), [in_b] "+r" (in_b)
: [weight_coeff_a] "f" (weight_coeff_a),
[weight_coeff_b] "f" (weight_coeff_b),
[length] "r" (length), [a_end]"r"(a_end)
: "$f0", "$f1", "$f2", "$f3", "$f4", "$f5"
);
}
#endif /* HAVE_INLINE_ASM */
void ff_acelp_vectors_init_mips(ACELPVContext *c)
{
#if HAVE_INLINE_ASM
c->weighted_vector_sumf = ff_weighted_vector_sumf_mips;
#endif
}

View File

@@ -0,0 +1,187 @@
/*
* Copyright (c) 2012
* MIPS Technologies, Inc., California.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* Author: Nedeljko Babic (nbabic@mips.com)
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Reference: libavcodec/amrwbdec.c
*/
#include "libavutil/avutil.h"
#include "libavcodec/amrwbdata.h"
#include "amrwbdec_mips.h"
#if HAVE_INLINE_ASM
void hb_fir_filter_mips(float *out, const float fir_coef[HB_FIR_SIZE + 1],
float mem[HB_FIR_SIZE], const float *in)
{
int i;
float data[AMRWB_SFR_SIZE_16k + HB_FIR_SIZE]; // past and current samples
memcpy(data, mem, HB_FIR_SIZE * sizeof(float));
memcpy(data + HB_FIR_SIZE, in, AMRWB_SFR_SIZE_16k * sizeof(float));
for (i = 0; i < AMRWB_SFR_SIZE_16k; i++) {
float output;
float * p_data = (data+i);
/**
* inner loop is entirely unrolled and instructions are scheduled
* to minimize pipeline stall
*/
__asm__ volatile(
"mtc1 $zero, %[output] \n\t"
"lwc1 $f0, 0(%[p_data]) \n\t"
"lwc1 $f1, 0(%[fir_coef]) \n\t"
"lwc1 $f2, 4(%[p_data]) \n\t"
"madd.s %[output], %[output], $f0, $f1 \n\t"
"lwc1 $f3, 4(%[fir_coef]) \n\t"
"lwc1 $f4, 8(%[p_data]) \n\t"
"madd.s %[output], %[output], $f2, $f3 \n\t"
"lwc1 $f5, 8(%[fir_coef]) \n\t"
"lwc1 $f0, 12(%[p_data]) \n\t"
"lwc1 $f1, 12(%[fir_coef]) \n\t"
"madd.s %[output], %[output], $f4, $f5 \n\t"
"lwc1 $f2, 16(%[p_data]) \n\t"
"madd.s %[output], %[output], $f0, $f1 \n\t"
"lwc1 $f3, 16(%[fir_coef]) \n\t"
"lwc1 $f4, 20(%[p_data]) \n\t"
"lwc1 $f5, 20(%[fir_coef]) \n\t"
"madd.s %[output], %[output], $f2, $f3 \n\t"
"lwc1 $f0, 24(%[p_data]) \n\t"
"lwc1 $f1, 24(%[fir_coef]) \n\t"
"lwc1 $f2, 28(%[p_data]) \n\t"
"madd.s %[output], %[output], $f4, $f5 \n\t"
"lwc1 $f3, 28(%[fir_coef]) \n\t"
"madd.s %[output], %[output], $f0, $f1 \n\t"
"lwc1 $f4, 32(%[p_data]) \n\t"
"madd.s %[output], %[output], $f2, $f3 \n\t"
"lwc1 $f5, 32(%[fir_coef]) \n\t"
"madd.s %[output], %[output], $f4, $f5 \n\t"
"lwc1 $f0, 36(%[p_data]) \n\t"
"lwc1 $f1, 36(%[fir_coef]) \n\t"
"lwc1 $f2, 40(%[p_data]) \n\t"
"lwc1 $f3, 40(%[fir_coef]) \n\t"
"madd.s %[output], %[output], $f0, $f1 \n\t"
"lwc1 $f4, 44(%[p_data]) \n\t"
"lwc1 $f5, 44(%[fir_coef]) \n\t"
"madd.s %[output], %[output], $f2, $f3 \n\t"
"lwc1 $f0, 48(%[p_data]) \n\t"
"lwc1 $f1, 48(%[fir_coef]) \n\t"
"lwc1 $f2, 52(%[p_data]) \n\t"
"madd.s %[output], %[output], $f4, $f5 \n\t"
"lwc1 $f3, 52(%[fir_coef]) \n\t"
"lwc1 $f4, 56(%[p_data]) \n\t"
"madd.s %[output], %[output], $f0, $f1 \n\t"
"lwc1 $f5, 56(%[fir_coef]) \n\t"
"madd.s %[output], %[output], $f2, $f3 \n\t"
"lwc1 $f0, 60(%[p_data]) \n\t"
"lwc1 $f1, 60(%[fir_coef]) \n\t"
"lwc1 $f2, 64(%[p_data]) \n\t"
"madd.s %[output], %[output], $f4, $f5 \n\t"
"lwc1 $f3, 64(%[fir_coef]) \n\t"
"madd.s %[output], %[output], $f0, $f1 \n\t"
"lwc1 $f4, 68(%[p_data]) \n\t"
"madd.s %[output], %[output], $f2, $f3 \n\t"
"lwc1 $f5, 68(%[fir_coef]) \n\t"
"madd.s %[output], %[output], $f4, $f5 \n\t"
"lwc1 $f0, 72(%[p_data]) \n\t"
"lwc1 $f1, 72(%[fir_coef]) \n\t"
"lwc1 $f2, 76(%[p_data]) \n\t"
"lwc1 $f3, 76(%[fir_coef]) \n\t"
"madd.s %[output], %[output], $f0, $f1 \n\t"
"lwc1 $f4, 80(%[p_data]) \n\t"
"lwc1 $f5, 80(%[fir_coef]) \n\t"
"madd.s %[output], %[output], $f2, $f3 \n\t"
"lwc1 $f0, 84(%[p_data]) \n\t"
"lwc1 $f1, 84(%[fir_coef]) \n\t"
"lwc1 $f2, 88(%[p_data]) \n\t"
"madd.s %[output], %[output], $f4, $f5 \n\t"
"lwc1 $f3, 88(%[fir_coef]) \n\t"
"lwc1 $f4, 92(%[p_data]) \n\t"
"madd.s %[output], %[output], $f0, $f1 \n\t"
"lwc1 $f5, 92(%[fir_coef]) \n\t"
"madd.s %[output], %[output], $f2, $f3 \n\t"
"lwc1 $f0, 96(%[p_data]) \n\t"
"lwc1 $f1, 96(%[fir_coef]) \n\t"
"lwc1 $f2, 100(%[p_data]) \n\t"
"madd.s %[output], %[output], $f4, $f5 \n\t"
"lwc1 $f3, 100(%[fir_coef]) \n\t"
"lwc1 $f4, 104(%[p_data]) \n\t"
"madd.s %[output], %[output], $f0, $f1 \n\t"
"lwc1 $f5, 104(%[fir_coef]) \n\t"
"madd.s %[output], %[output], $f2, $f3 \n\t"
"lwc1 $f0, 108(%[p_data]) \n\t"
"lwc1 $f1, 108(%[fir_coef]) \n\t"
"madd.s %[output], %[output], $f4, $f5 \n\t"
"lwc1 $f2, 112(%[p_data]) \n\t"
"lwc1 $f3, 112(%[fir_coef]) \n\t"
"madd.s %[output], %[output], $f0, $f1 \n\t"
"lwc1 $f4, 116(%[p_data]) \n\t"
"lwc1 $f5, 116(%[fir_coef]) \n\t"
"lwc1 $f0, 120(%[p_data]) \n\t"
"madd.s %[output], %[output], $f2, $f3 \n\t"
"lwc1 $f1, 120(%[fir_coef]) \n\t"
"madd.s %[output], %[output], $f4, $f5 \n\t"
"madd.s %[output], %[output], $f0, $f1 \n\t"
: [output]"=&f"(output)
: [fir_coef]"r"(fir_coef), [p_data]"r"(p_data)
: "$f0", "$f1", "$f2", "$f3", "$f4", "$f5"
);
out[i] = output;
}
memcpy(mem, data + AMRWB_SFR_SIZE_16k, HB_FIR_SIZE * sizeof(float));
}
#endif /* HAVE_INLINE_ASM */

View File

@@ -0,0 +1,62 @@
/*
* Copyright (c) 2012
* MIPS Technologies, Inc., California.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* Author: Nedeljko Babic (nbabic@mips.com)
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Reference: libavcodec/amrwbdec.c
*/
#ifndef AVCODEC_AMRWBDEC_MIPS_H
#define AVCODEC_AMRWBDEC_MIPS_H
#include "config.h"
#if HAVE_MIPSFPU && HAVE_INLINE_ASM
void hb_fir_filter_mips(float *out, const float fir_coef[],
float mem[], const float *in);
#define hb_fir_filter hb_fir_filter_mips
#endif
#endif /* AVCODEC_AMRWBDEC_MIPS_H */

View File

@@ -0,0 +1,286 @@
/*
* Copyright (c) 2012
* MIPS Technologies, Inc., California.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* Author: Nedeljko Babic (nbabic@mips.com)
*
* various filters for CELP-based codecs optimized for MIPS
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Reference: libavcodec/celp_filters.c
*/
#include "config.h"
#include "libavutil/attributes.h"
#include "libavutil/common.h"
#include "libavcodec/celp_filters.h"
#if HAVE_INLINE_ASM
static void ff_celp_lp_synthesis_filterf_mips(float *out,
const float *filter_coeffs,
const float* in, int buffer_length,
int filter_length)
{
int i,n;
float out0, out1, out2, out3;
float old_out0, old_out1, old_out2, old_out3;
float a,b,c;
const float *p_filter_coeffs;
float *p_out;
a = filter_coeffs[0];
b = filter_coeffs[1];
c = filter_coeffs[2];
b -= filter_coeffs[0] * filter_coeffs[0];
c -= filter_coeffs[1] * filter_coeffs[0];
c -= filter_coeffs[0] * b;
old_out0 = out[-4];
old_out1 = out[-3];
old_out2 = out[-2];
old_out3 = out[-1];
for (n = 0; n <= buffer_length - 4; n+=4) {
p_filter_coeffs = filter_coeffs;
p_out = out;
out0 = in[0];
out1 = in[1];
out2 = in[2];
out3 = in[3];
__asm__ volatile(
"lwc1 $f2, 8(%[filter_coeffs]) \n\t"
"lwc1 $f1, 4(%[filter_coeffs]) \n\t"
"lwc1 $f0, 0(%[filter_coeffs]) \n\t"
"nmsub.s %[out0], %[out0], $f2, %[old_out1] \n\t"
"nmsub.s %[out1], %[out1], $f2, %[old_out2] \n\t"
"nmsub.s %[out2], %[out2], $f2, %[old_out3] \n\t"
"lwc1 $f3, 12(%[filter_coeffs]) \n\t"
"nmsub.s %[out0], %[out0], $f1, %[old_out2] \n\t"
"nmsub.s %[out1], %[out1], $f1, %[old_out3] \n\t"
"nmsub.s %[out2], %[out2], $f3, %[old_out2] \n\t"
"nmsub.s %[out0], %[out0], $f0, %[old_out3] \n\t"
"nmsub.s %[out3], %[out3], $f3, %[old_out3] \n\t"
"nmsub.s %[out1], %[out1], $f3, %[old_out1] \n\t"
"nmsub.s %[out0], %[out0], $f3, %[old_out0] \n\t"
: [out0]"+f"(out0), [out1]"+f"(out1),
[out2]"+f"(out2), [out3]"+f"(out3)
: [old_out0]"f"(old_out0), [old_out1]"f"(old_out1),
[old_out2]"f"(old_out2), [old_out3]"f"(old_out3),
[filter_coeffs]"r"(filter_coeffs)
: "$f0", "$f1", "$f2", "$f3", "$f4"
);
for (i = 5; i <= filter_length; i += 2) {
__asm__ volatile(
"lwc1 %[old_out3], -20(%[p_out]) \n\t"
"lwc1 $f5, 16(%[p_filter_coeffs]) \n\t"
"addiu %[p_out], -8 \n\t"
"addiu %[p_filter_coeffs], 8 \n\t"
"nmsub.s %[out1], %[out1], $f5, %[old_out0] \n\t"
"nmsub.s %[out3], %[out3], $f5, %[old_out2] \n\t"
"lwc1 $f4, 12(%[p_filter_coeffs]) \n\t"
"lwc1 %[old_out2], -16(%[p_out]) \n\t"
"nmsub.s %[out0], %[out0], $f5, %[old_out3] \n\t"
"nmsub.s %[out2], %[out2], $f5, %[old_out1] \n\t"
"nmsub.s %[out1], %[out1], $f4, %[old_out3] \n\t"
"nmsub.s %[out3], %[out3], $f4, %[old_out1] \n\t"
"mov.s %[old_out1], %[old_out3] \n\t"
"nmsub.s %[out0], %[out0], $f4, %[old_out2] \n\t"
"nmsub.s %[out2], %[out2], $f4, %[old_out0] \n\t"
: [out0]"+f"(out0), [out1]"+f"(out1),
[out2]"+f"(out2), [out3]"+f"(out3), [old_out0]"+f"(old_out0),
[old_out1]"+f"(old_out1), [old_out2]"+f"(old_out2),
[old_out3]"+f"(old_out3),[p_filter_coeffs]"+r"(p_filter_coeffs),
[p_out]"+r"(p_out)
:
: "$f4", "$f5"
);
FFSWAP(float, old_out0, old_out2);
}
__asm__ volatile(
"nmsub.s %[out3], %[out3], %[a], %[out2] \n\t"
"nmsub.s %[out2], %[out2], %[a], %[out1] \n\t"
"nmsub.s %[out3], %[out3], %[b], %[out1] \n\t"
"nmsub.s %[out1], %[out1], %[a], %[out0] \n\t"
"nmsub.s %[out2], %[out2], %[b], %[out0] \n\t"
"nmsub.s %[out3], %[out3], %[c], %[out0] \n\t"
: [out0]"+f"(out0), [out1]"+f"(out1),
[out2]"+f"(out2), [out3]"+f"(out3)
: [a]"f"(a), [b]"f"(b), [c]"f"(c)
);
out[0] = out0;
out[1] = out1;
out[2] = out2;
out[3] = out3;
old_out0 = out0;
old_out1 = out1;
old_out2 = out2;
old_out3 = out3;
out += 4;
in += 4;
}
out -= n;
in -= n;
for (; n < buffer_length; n++) {
float out_val, out_val_i, fc_val;
p_filter_coeffs = filter_coeffs;
p_out = &out[n];
out_val = in[n];
for (i = 1; i <= filter_length; i++) {
__asm__ volatile(
"lwc1 %[fc_val], 0(%[p_filter_coeffs]) \n\t"
"lwc1 %[out_val_i], -4(%[p_out]) \n\t"
"addiu %[p_filter_coeffs], 4 \n\t"
"addiu %[p_out], -4 \n\t"
"nmsub.s %[out_val], %[out_val], %[fc_val], %[out_val_i] \n\t"
: [fc_val]"=&f"(fc_val), [out_val]"+f"(out_val),
[out_val_i]"=&f"(out_val_i), [p_out]"+r"(p_out),
[p_filter_coeffs]"+r"(p_filter_coeffs)
);
}
out[n] = out_val;
}
}
static void ff_celp_lp_zero_synthesis_filterf_mips(float *out,
const float *filter_coeffs,
const float *in, int buffer_length,
int filter_length)
{
int i,n;
float sum_out8, sum_out7, sum_out6, sum_out5, sum_out4, fc_val;
float sum_out3, sum_out2, sum_out1;
const float *p_filter_coeffs, *p_in;
for (n = 0; n < buffer_length; n+=8) {
p_in = &in[n];
p_filter_coeffs = filter_coeffs;
sum_out8 = in[n+7];
sum_out7 = in[n+6];
sum_out6 = in[n+5];
sum_out5 = in[n+4];
sum_out4 = in[n+3];
sum_out3 = in[n+2];
sum_out2 = in[n+1];
sum_out1 = in[n];
i = filter_length;
/* i is always greater than 0
* outer loop is unrolled eight times so there is less memory access
* inner loop is unrolled two times
*/
__asm__ volatile(
"filt_lp_inner%=: \n\t"
"lwc1 %[fc_val], 0(%[p_filter_coeffs]) \n\t"
"lwc1 $f7, 6*4(%[p_in]) \n\t"
"lwc1 $f6, 5*4(%[p_in]) \n\t"
"lwc1 $f5, 4*4(%[p_in]) \n\t"
"lwc1 $f4, 3*4(%[p_in]) \n\t"
"lwc1 $f3, 2*4(%[p_in]) \n\t"
"lwc1 $f2, 4(%[p_in]) \n\t"
"lwc1 $f1, 0(%[p_in]) \n\t"
"lwc1 $f0, -4(%[p_in]) \n\t"
"addiu %[i], -2 \n\t"
"madd.s %[sum_out8], %[sum_out8], %[fc_val], $f7 \n\t"
"madd.s %[sum_out7], %[sum_out7], %[fc_val], $f6 \n\t"
"madd.s %[sum_out6], %[sum_out6], %[fc_val], $f5 \n\t"
"madd.s %[sum_out5], %[sum_out5], %[fc_val], $f4 \n\t"
"madd.s %[sum_out4], %[sum_out4], %[fc_val], $f3 \n\t"
"madd.s %[sum_out3], %[sum_out3], %[fc_val], $f2 \n\t"
"madd.s %[sum_out2], %[sum_out2], %[fc_val], $f1 \n\t"
"madd.s %[sum_out1], %[sum_out1], %[fc_val], $f0 \n\t"
"lwc1 %[fc_val], 4(%[p_filter_coeffs]) \n\t"
"lwc1 $f7, -8(%[p_in]) \n\t"
"addiu %[p_filter_coeffs], 8 \n\t"
"addiu %[p_in], -8 \n\t"
"madd.s %[sum_out8], %[sum_out8], %[fc_val], $f6 \n\t"
"madd.s %[sum_out7], %[sum_out7], %[fc_val], $f5 \n\t"
"madd.s %[sum_out6], %[sum_out6], %[fc_val], $f4 \n\t"
"madd.s %[sum_out5], %[sum_out5], %[fc_val], $f3 \n\t"
"madd.s %[sum_out4], %[sum_out4], %[fc_val], $f2 \n\t"
"madd.s %[sum_out3], %[sum_out3], %[fc_val], $f1 \n\t"
"madd.s %[sum_out2], %[sum_out2], %[fc_val], $f0 \n\t"
"madd.s %[sum_out1], %[sum_out1], %[fc_val], $f7 \n\t"
"bgtz %[i], filt_lp_inner%= \n\t"
: [sum_out8]"+f"(sum_out8), [sum_out7]"+f"(sum_out7),
[sum_out6]"+f"(sum_out6), [sum_out5]"+f"(sum_out5),
[sum_out4]"+f"(sum_out4), [sum_out3]"+f"(sum_out3),
[sum_out2]"+f"(sum_out2), [sum_out1]"+f"(sum_out1),
[fc_val]"=&f"(fc_val), [p_filter_coeffs]"+r"(p_filter_coeffs),
[p_in]"+r"(p_in), [i]"+r"(i)
:
: "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7"
);
out[n+7] = sum_out8;
out[n+6] = sum_out7;
out[n+5] = sum_out6;
out[n+4] = sum_out5;
out[n+3] = sum_out4;
out[n+2] = sum_out3;
out[n+1] = sum_out2;
out[n] = sum_out1;
}
}
#endif /* HAVE_INLINE_ASM */
void ff_celp_filter_init_mips(CELPFContext *c)
{
#if HAVE_INLINE_ASM
c->celp_lp_synthesis_filterf = ff_celp_lp_synthesis_filterf_mips;
c->celp_lp_zero_synthesis_filterf = ff_celp_lp_zero_synthesis_filterf_mips;
#endif
}

View File

@@ -0,0 +1,89 @@
/*
* Copyright (c) 2012
* MIPS Technologies, Inc., California.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* Author: Nedeljko Babic (nbabic@mips.com)
*
* Math operations optimized for MIPS
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Reference: libavcodec/celp_math.c
*/
#include "config.h"
#include "libavcodec/celp_math.h"
#if HAVE_INLINE_ASM
static float ff_dot_productf_mips(const float* a, const float* b,
int length)
{
float sum;
const float* a_end = a + length;
__asm__ volatile (
"mtc1 $zero, %[sum] \n\t"
"blez %[length], ff_dot_productf_end%= \n\t"
"ff_dot_productf_madd%=: \n\t"
"lwc1 $f2, 0(%[a]) \n\t"
"lwc1 $f1, 0(%[b]) \n\t"
"addiu %[a], %[a], 4 \n\t"
"addiu %[b], %[b], 4 \n\t"
"madd.s %[sum], %[sum], $f1, $f2 \n\t"
"bne %[a], %[a_end], ff_dot_productf_madd%= \n\t"
"ff_dot_productf_end%=: \n\t"
: [sum] "=&f" (sum), [a] "+r" (a), [b] "+r" (b)
: [a_end]"r"(a_end), [length] "r" (length)
: "$f1", "$f2"
);
return sum;
}
#endif /* HAVE_INLINE_ASM */
void ff_celp_math_init_mips(CELPMContext *c)
{
#if HAVE_INLINE_ASM
c->dot_productf = ff_dot_productf_mips;
#endif
}

View File

@@ -0,0 +1,248 @@
/*
* Copyright (c) 2012
* MIPS Technologies, Inc., California.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* Author: Bojan Zivkovic (bojan@mips.com)
*
* Compute antialias function optimised for MIPS fixed-point architecture
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Reference: libavcodec/mpegaudiodec.c
*/
#ifndef AVCODEC_MIPS_COMPUTE_ANTIALIAS_FIXED_H
#define AVCODEC_MIPS_COMPUTE_ANTIALIAS_FIXED_H
#if HAVE_INLINE_ASM
static void compute_antialias_mips_fixed(MPADecodeContext *s,
GranuleDef *g)
{
int32_t *ptr, *csa;
int n, i;
int MAX_lo = 0xffffffff;
/* we antialias only "long" bands */
if (g->block_type == 2) {
if (!g->switch_point)
return;
/* XXX: check this for 8000Hz case */
n = 1;
} else {
n = SBLIMIT - 1;
}
ptr = g->sb_hybrid + 18;
for(i = n;i > 0;i--) {
int tmp0, tmp1, tmp2, tmp00, tmp11;
int temp_reg1, temp_reg2, temp_reg3, temp_reg4, temp_reg5, temp_reg6;
csa = &csa_table[0][0];
/**
* instructions are scheduled to minimize pipeline stall.
*/
__asm__ volatile (
"lw %[tmp0], -1*4(%[ptr]) \n\t"
"lw %[tmp1], 0*4(%[ptr]) \n\t"
"lw %[temp_reg1], 0*4(%[csa]) \n\t"
"lw %[temp_reg2], 2*4(%[csa]) \n\t"
"add %[tmp2], %[tmp0], %[tmp1] \n\t"
"lw %[temp_reg3], 3*4(%[csa]) \n\t"
"mult $ac0, %[tmp2], %[temp_reg1] \n\t"
"mult $ac1, %[tmp2], %[temp_reg1] \n\t"
"lw %[tmp00], -2*4(%[ptr]) \n\t"
"lw %[tmp11], 1*4(%[ptr]) \n\t"
"lw %[temp_reg4], 4*4(%[csa]) \n\t"
"mtlo %[MAX_lo], $ac0 \n\t"
"mtlo $zero, $ac1 \n\t"
"msub $ac0, %[tmp1], %[temp_reg2] \n\t"
"madd $ac1, %[tmp0], %[temp_reg3] \n\t"
"add %[tmp2], %[tmp00], %[tmp11] \n\t"
"lw %[temp_reg5], 6*4(%[csa]) \n\t"
"mult $ac2, %[tmp2], %[temp_reg4] \n\t"
"mult $ac3, %[tmp2], %[temp_reg4] \n\t"
"mfhi %[temp_reg1], $ac0 \n\t"
"mfhi %[temp_reg2], $ac1 \n\t"
"lw %[temp_reg6], 7*4(%[csa]) \n\t"
"mtlo %[MAX_lo], $ac2 \n\t"
"msub $ac2, %[tmp11], %[temp_reg5] \n\t"
"mtlo $zero, $ac3 \n\t"
"madd $ac3, %[tmp00], %[temp_reg6] \n\t"
"sll %[temp_reg1], %[temp_reg1], 2 \n\t"
"sw %[temp_reg1], -1*4(%[ptr]) \n\t"
"mfhi %[temp_reg4], $ac2 \n\t"
"sll %[temp_reg2], %[temp_reg2], 2 \n\t"
"mfhi %[temp_reg5], $ac3 \n\t"
"sw %[temp_reg2], 0*4(%[ptr]) \n\t"
"lw %[tmp0], -3*4(%[ptr]) \n\t"
"lw %[tmp1], 2*4(%[ptr]) \n\t"
"lw %[temp_reg1], 8*4(%[csa]) \n\t"
"sll %[temp_reg4], %[temp_reg4], 2 \n\t"
"add %[tmp2], %[tmp0], %[tmp1] \n\t"
"sll %[temp_reg5], %[temp_reg5], 2 \n\t"
"mult $ac0, %[tmp2], %[temp_reg1] \n\t"
"mult $ac1, %[tmp2], %[temp_reg1] \n\t"
"sw %[temp_reg4], -2*4(%[ptr]) \n\t"
"sw %[temp_reg5], 1*4(%[ptr]) \n\t"
"lw %[temp_reg2], 10*4(%[csa]) \n\t"
"mtlo %[MAX_lo], $ac0 \n\t"
"lw %[temp_reg3], 11*4(%[csa]) \n\t"
"msub $ac0, %[tmp1], %[temp_reg2] \n\t"
"mtlo $zero, $ac1 \n\t"
"madd $ac1, %[tmp0], %[temp_reg3] \n\t"
"lw %[tmp00], -4*4(%[ptr]) \n\t"
"lw %[tmp11], 3*4(%[ptr]) \n\t"
"mfhi %[temp_reg1], $ac0 \n\t"
"lw %[temp_reg4], 12*4(%[csa]) \n\t"
"mfhi %[temp_reg2], $ac1 \n\t"
"add %[tmp2], %[tmp00], %[tmp11] \n\t"
"mult $ac2, %[tmp2], %[temp_reg4] \n\t"
"mult $ac3, %[tmp2], %[temp_reg4] \n\t"
"lw %[temp_reg5], 14*4(%[csa]) \n\t"
"lw %[temp_reg6], 15*4(%[csa]) \n\t"
"sll %[temp_reg1], %[temp_reg1], 2 \n\t"
"mtlo %[MAX_lo], $ac2 \n\t"
"msub $ac2, %[tmp11], %[temp_reg5] \n\t"
"mtlo $zero, $ac3 \n\t"
"madd $ac3, %[tmp00], %[temp_reg6] \n\t"
"sll %[temp_reg2], %[temp_reg2], 2 \n\t"
"sw %[temp_reg1], -3*4(%[ptr]) \n\t"
"mfhi %[temp_reg4], $ac2 \n\t"
"sw %[temp_reg2], 2*4(%[ptr]) \n\t"
"mfhi %[temp_reg5], $ac3 \n\t"
"lw %[tmp0], -5*4(%[ptr]) \n\t"
"lw %[tmp1], 4*4(%[ptr]) \n\t"
"lw %[temp_reg1], 16*4(%[csa]) \n\t"
"lw %[temp_reg2], 18*4(%[csa]) \n\t"
"add %[tmp2], %[tmp0], %[tmp1] \n\t"
"lw %[temp_reg3], 19*4(%[csa]) \n\t"
"mult $ac0, %[tmp2], %[temp_reg1] \n\t"
"mult $ac1, %[tmp2], %[temp_reg1] \n\t"
"sll %[temp_reg4], %[temp_reg4], 2 \n\t"
"sll %[temp_reg5], %[temp_reg5], 2 \n\t"
"sw %[temp_reg4], -4*4(%[ptr]) \n\t"
"mtlo %[MAX_lo], $ac0 \n\t"
"msub $ac0, %[tmp1], %[temp_reg2] \n\t"
"mtlo $zero, $ac1 \n\t"
"madd $ac1, %[tmp0], %[temp_reg3] \n\t"
"sw %[temp_reg5], 3*4(%[ptr]) \n\t"
"lw %[tmp00], -6*4(%[ptr]) \n\t"
"mfhi %[temp_reg1], $ac0 \n\t"
"lw %[tmp11], 5*4(%[ptr]) \n\t"
"mfhi %[temp_reg2], $ac1 \n\t"
"lw %[temp_reg4], 20*4(%[csa]) \n\t"
"add %[tmp2], %[tmp00], %[tmp11] \n\t"
"lw %[temp_reg5], 22*4(%[csa]) \n\t"
"mult $ac2, %[tmp2], %[temp_reg4] \n\t"
"mult $ac3, %[tmp2], %[temp_reg4] \n\t"
"lw %[temp_reg6], 23*4(%[csa]) \n\t"
"sll %[temp_reg1], %[temp_reg1], 2 \n\t"
"sll %[temp_reg2], %[temp_reg2], 2 \n\t"
"mtlo %[MAX_lo], $ac2 \n\t"
"msub $ac2, %[tmp11], %[temp_reg5] \n\t"
"mtlo $zero, $ac3 \n\t"
"madd $ac3, %[tmp00], %[temp_reg6] \n\t"
"sw %[temp_reg1], -5*4(%[ptr]) \n\t"
"sw %[temp_reg2], 4*4(%[ptr]) \n\t"
"mfhi %[temp_reg4], $ac2 \n\t"
"lw %[tmp0], -7*4(%[ptr]) \n\t"
"mfhi %[temp_reg5], $ac3 \n\t"
"lw %[tmp1], 6*4(%[ptr]) \n\t"
"lw %[temp_reg1], 24*4(%[csa]) \n\t"
"lw %[temp_reg2], 26*4(%[csa]) \n\t"
"add %[tmp2], %[tmp0], %[tmp1] \n\t"
"lw %[temp_reg3], 27*4(%[csa]) \n\t"
"mult $ac0, %[tmp2], %[temp_reg1] \n\t"
"mult $ac1, %[tmp2], %[temp_reg1] \n\t"
"sll %[temp_reg4], %[temp_reg4], 2 \n\t"
"sll %[temp_reg5], %[temp_reg5], 2 \n\t"
"sw %[temp_reg4], -6*4(%[ptr]) \n\t"
"mtlo %[MAX_lo], $ac0 \n\t"
"msub $ac0, %[tmp1], %[temp_reg2] \n\t"
"mtlo $zero, $ac1 \n\t"
"madd $ac1, %[tmp0], %[temp_reg3] \n\t"
"sw %[temp_reg5], 5*4(%[ptr]) \n\t"
"lw %[tmp00], -8*4(%[ptr]) \n\t"
"mfhi %[temp_reg1], $ac0 \n\t"
"lw %[tmp11], 7*4(%[ptr]) \n\t"
"mfhi %[temp_reg2], $ac1 \n\t"
"lw %[temp_reg4], 28*4(%[csa]) \n\t"
"add %[tmp2], %[tmp00], %[tmp11] \n\t"
"lw %[temp_reg5], 30*4(%[csa]) \n\t"
"mult $ac2, %[tmp2], %[temp_reg4] \n\t"
"mult $ac3, %[tmp2], %[temp_reg4] \n\t"
"lw %[temp_reg6], 31*4(%[csa]) \n\t"
"sll %[temp_reg1], %[temp_reg1], 2 \n\t"
"sll %[temp_reg2], %[temp_reg2], 2 \n\t"
"mtlo %[MAX_lo], $ac2 \n\t"
"msub $ac2, %[tmp11], %[temp_reg5] \n\t"
"mtlo $zero, $ac3 \n\t"
"madd $ac3, %[tmp00], %[temp_reg6] \n\t"
"sw %[temp_reg1], -7*4(%[ptr]) \n\t"
"sw %[temp_reg2], 6*4(%[ptr]) \n\t"
"mfhi %[temp_reg4], $ac2 \n\t"
"mfhi %[temp_reg5], $ac3 \n\t"
"sll %[temp_reg4], %[temp_reg4], 2 \n\t"
"sll %[temp_reg5], %[temp_reg5], 2 \n\t"
"sw %[temp_reg4], -8*4(%[ptr]) \n\t"
"sw %[temp_reg5], 7*4(%[ptr]) \n\t"
: [tmp0] "=&r" (tmp0), [tmp1] "=&r" (tmp1), [tmp2] "=&r" (tmp2),
[tmp00] "=&r" (tmp00), [tmp11] "=&r" (tmp11),
[temp_reg1] "=&r" (temp_reg1), [temp_reg2] "=&r" (temp_reg2),
[temp_reg3] "=&r" (temp_reg3), [temp_reg4] "=&r" (temp_reg4),
[temp_reg5] "=&r" (temp_reg5), [temp_reg6] "=&r" (temp_reg6)
: [csa] "r" (csa), [ptr] "r" (ptr),
[MAX_lo] "r" (MAX_lo)
);
ptr += 18;
}
}
#define compute_antialias compute_antialias_mips_fixed
#endif /* HAVE_INLINE_ASM */
#endif /* AVCODEC_MIPS_COMPUTE_ANTIALIAS_FIXED_H */

View File

@@ -0,0 +1,184 @@
/*
* Copyright (c) 2012
* MIPS Technologies, Inc., California.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* Author: Bojan Zivkovic (bojan@mips.com)
*
* Compute antialias function optimised for MIPS floating-point architecture
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Reference: libavcodec/mpegaudiodec.c
*/
#ifndef AVCODEC_MIPS_COMPUTE_ANTIALIAS_FLOAT_H
#define AVCODEC_MIPS_COMPUTE_ANTIALIAS_FLOAT_H
#if HAVE_INLINE_ASM
static void compute_antialias_mips_float(MPADecodeContext *s,
GranuleDef *g)
{
float *ptr, *ptr_end;
float *csa = &csa_table[0][0];
int n;
/* temporary variables */
float in1, in2, in3, in4, in5, in6, in7, in8;
float out1, out2, out3, out4;
ptr = g->sb_hybrid + 18;
/* we antialias only "long" bands */
if (g->block_type == 2) {
if (!g->switch_point)
return;
/* XXX: check this for 8000Hz case */
n = 1;
ptr_end = ptr + 18;
} else {
n = 31;
ptr_end = ptr + 558;
}
/**
* instructions are scheduled to minimize pipeline stall.
*/
__asm__ volatile (
"compute_antialias_float_loop%=: \t\n"
"lwc1 %[in1], -1*4(%[ptr]) \t\n"
"lwc1 %[in2], 0(%[csa]) \t\n"
"lwc1 %[in3], 1*4(%[csa]) \t\n"
"lwc1 %[in4], 0(%[ptr]) \t\n"
"lwc1 %[in5], -2*4(%[ptr]) \t\n"
"lwc1 %[in6], 4*4(%[csa]) \t\n"
"mul.s %[out1], %[in1], %[in2] \t\n"
"mul.s %[out2], %[in1], %[in3] \t\n"
"lwc1 %[in7], 5*4(%[csa]) \t\n"
"lwc1 %[in8], 1*4(%[ptr]) \t\n"
"nmsub.s %[out1], %[out1], %[in3], %[in4] \t\n"
"madd.s %[out2], %[out2], %[in2], %[in4] \t\n"
"mul.s %[out3], %[in5], %[in6] \t\n"
"mul.s %[out4], %[in5], %[in7] \t\n"
"lwc1 %[in1], -3*4(%[ptr]) \t\n"
"swc1 %[out1], -1*4(%[ptr]) \t\n"
"swc1 %[out2], 0(%[ptr]) \t\n"
"nmsub.s %[out3], %[out3], %[in7], %[in8] \t\n"
"madd.s %[out4], %[out4], %[in6], %[in8] \t\n"
"lwc1 %[in2], 8*4(%[csa]) \t\n"
"swc1 %[out3], -2*4(%[ptr]) \t\n"
"swc1 %[out4], 1*4(%[ptr]) \t\n"
"lwc1 %[in3], 9*4(%[csa]) \t\n"
"lwc1 %[in4], 2*4(%[ptr]) \t\n"
"mul.s %[out1], %[in1], %[in2] \t\n"
"lwc1 %[in5], -4*4(%[ptr]) \t\n"
"lwc1 %[in6], 12*4(%[csa]) \t\n"
"mul.s %[out2], %[in1], %[in3] \t\n"
"lwc1 %[in7], 13*4(%[csa]) \t\n"
"nmsub.s %[out1], %[out1], %[in3], %[in4] \t\n"
"lwc1 %[in8], 3*4(%[ptr]) \t\n"
"mul.s %[out3], %[in5], %[in6] \t\n"
"madd.s %[out2], %[out2], %[in2], %[in4] \t\n"
"mul.s %[out4], %[in5], %[in7] \t\n"
"swc1 %[out1], -3*4(%[ptr]) \t\n"
"lwc1 %[in1], -5*4(%[ptr]) \t\n"
"nmsub.s %[out3], %[out3], %[in7], %[in8] \t\n"
"swc1 %[out2], 2*4(%[ptr]) \t\n"
"madd.s %[out4], %[out4], %[in6], %[in8] \t\n"
"lwc1 %[in2], 16*4(%[csa]) \t\n"
"lwc1 %[in3], 17*4(%[csa]) \t\n"
"swc1 %[out3], -4*4(%[ptr]) \t\n"
"lwc1 %[in4], 4*4(%[ptr]) \t\n"
"swc1 %[out4], 3*4(%[ptr]) \t\n"
"mul.s %[out1], %[in1], %[in2] \t\n"
"mul.s %[out2], %[in1], %[in3] \t\n"
"lwc1 %[in5], -6*4(%[ptr]) \t\n"
"lwc1 %[in6], 20*4(%[csa]) \t\n"
"lwc1 %[in7], 21*4(%[csa]) \t\n"
"nmsub.s %[out1], %[out1], %[in3], %[in4] \t\n"
"madd.s %[out2], %[out2], %[in2], %[in4] \t\n"
"lwc1 %[in8], 5*4(%[ptr]) \t\n"
"mul.s %[out3], %[in5], %[in6] \t\n"
"mul.s %[out4], %[in5], %[in7] \t\n"
"swc1 %[out1], -5*4(%[ptr]) \t\n"
"swc1 %[out2], 4*4(%[ptr]) \t\n"
"lwc1 %[in1], -7*4(%[ptr]) \t\n"
"nmsub.s %[out3], %[out3], %[in7], %[in8] \t\n"
"madd.s %[out4], %[out4], %[in6], %[in8] \t\n"
"lwc1 %[in2], 24*4(%[csa]) \t\n"
"lwc1 %[in3], 25*4(%[csa]) \t\n"
"lwc1 %[in4], 6*4(%[ptr]) \t\n"
"swc1 %[out3], -6*4(%[ptr]) \t\n"
"swc1 %[out4], 5*4(%[ptr]) \t\n"
"mul.s %[out1], %[in1], %[in2] \t\n"
"lwc1 %[in5], -8*4(%[ptr]) \t\n"
"mul.s %[out2], %[in1], %[in3] \t\n"
"lwc1 %[in6], 28*4(%[csa]) \t\n"
"lwc1 %[in7], 29*4(%[csa]) \t\n"
"nmsub.s %[out1], %[out1], %[in3], %[in4] \t\n"
"lwc1 %[in8], 7*4(%[ptr]) \t\n"
"madd.s %[out2], %[out2], %[in2], %[in4] \t\n"
"mul.s %[out3], %[in5], %[in6] \t\n"
"mul.s %[out4], %[in5], %[in7] \t\n"
"swc1 %[out1], -7*4(%[ptr]) \t\n"
"swc1 %[out2], 6*4(%[ptr]) \t\n"
"addiu %[ptr], %[ptr], 72 \t\n"
"nmsub.s %[out3], %[out3], %[in7], %[in8] \t\n"
"madd.s %[out4], %[out4], %[in6], %[in8] \t\n"
"swc1 %[out3], -26*4(%[ptr]) \t\n"
"swc1 %[out4], -11*4(%[ptr]) \t\n"
"bne %[ptr], %[ptr_end], compute_antialias_float_loop%= \t\n"
: [ptr] "+r" (ptr),
[in1] "=&f" (in1), [in2] "=&f" (in2),
[in3] "=&f" (in3), [in4] "=&f" (in4),
[in5] "=&f" (in5), [in6] "=&f" (in6),
[in7] "=&f" (in7), [in8] "=&f" (in8),
[out1] "=&f" (out1), [out2] "=&f" (out2),
[out3] "=&f" (out3), [out4] "=&f" (out4)
: [csa] "r" (csa), [ptr_end] "r" (ptr_end)
);
}
#define compute_antialias compute_antialias_mips_float
#endif /* HAVE_INLINE_ASM */
#endif /* AVCODEC_MIPS_COMPUTE_ANTIALIAS_FLOAT_H */

View File

@@ -0,0 +1,168 @@
/*
* Copyright (c) 2012
* MIPS Technologies, Inc., California.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the MIPS Technologies, Inc., nor the names of is
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* Author: Zoran Lukic (zoranl@mips.com)
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
#include "libavcodec/dsputil.h"
#if HAVE_INLINE_ASM
static void vector_fmul_window_mips(float *dst, const float *src0,
const float *src1, const float *win, int len)
{
int i, j;
/*
* variables used in inline assembler
*/
float * dst_i, * dst_j, * dst_i2, * dst_j2;
float temp, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
dst += len;
win += len;
src0 += len;
for (i = -len, j = len - 1; i < 0; i += 8, j -= 8) {
dst_i = dst + i;
dst_j = dst + j;
dst_i2 = dst + i + 4;
dst_j2 = dst + j - 4;
__asm__ volatile (
"mul.s %[temp], %[s1], %[wi] \n\t"
"mul.s %[temp1], %[s1], %[wj] \n\t"
"mul.s %[temp2], %[s11], %[wi1] \n\t"
"mul.s %[temp3], %[s11], %[wj1] \n\t"
"msub.s %[temp], %[temp], %[s0], %[wj] \n\t"
"madd.s %[temp1], %[temp1], %[s0], %[wi] \n\t"
"msub.s %[temp2], %[temp2], %[s01], %[wj1] \n\t"
"madd.s %[temp3], %[temp3], %[s01], %[wi1] \n\t"
"swc1 %[temp], 0(%[dst_i]) \n\t" /* dst[i] = s0*wj - s1*wi; */
"swc1 %[temp1], 0(%[dst_j]) \n\t" /* dst[j] = s0*wi + s1*wj; */
"swc1 %[temp2], 4(%[dst_i]) \n\t" /* dst[i+1] = s01*wj1 - s11*wi1; */
"swc1 %[temp3], -4(%[dst_j]) \n\t" /* dst[j-1] = s01*wi1 + s11*wj1; */
"mul.s %[temp4], %[s12], %[wi2] \n\t"
"mul.s %[temp5], %[s12], %[wj2] \n\t"
"mul.s %[temp6], %[s13], %[wi3] \n\t"
"mul.s %[temp7], %[s13], %[wj3] \n\t"
"msub.s %[temp4], %[temp4], %[s02], %[wj2] \n\t"
"madd.s %[temp5], %[temp5], %[s02], %[wi2] \n\t"
"msub.s %[temp6], %[temp6], %[s03], %[wj3] \n\t"
"madd.s %[temp7], %[temp7], %[s03], %[wi3] \n\t"
"swc1 %[temp4], 8(%[dst_i]) \n\t" /* dst[i+2] = s02*wj2 - s12*wi2; */
"swc1 %[temp5], -8(%[dst_j]) \n\t" /* dst[j-2] = s02*wi2 + s12*wj2; */
"swc1 %[temp6], 12(%[dst_i]) \n\t" /* dst[i+2] = s03*wj3 - s13*wi3; */
"swc1 %[temp7], -12(%[dst_j]) \n\t" /* dst[j-3] = s03*wi3 + s13*wj3; */
: [temp]"=&f"(temp), [temp1]"=&f"(temp1), [temp2]"=&f"(temp2),
[temp3]"=&f"(temp3), [temp4]"=&f"(temp4), [temp5]"=&f"(temp5),
[temp6]"=&f"(temp6), [temp7]"=&f"(temp7)
: [dst_j]"r"(dst_j), [dst_i]"r" (dst_i),
[s0] "f"(src0[i]), [wj] "f"(win[j]), [s1] "f"(src1[j]),
[wi] "f"(win[i]), [s01]"f"(src0[i + 1]),[wj1]"f"(win[j - 1]),
[s11]"f"(src1[j - 1]), [wi1]"f"(win[i + 1]), [s02]"f"(src0[i + 2]),
[wj2]"f"(win[j - 2]), [s12]"f"(src1[j - 2]),[wi2]"f"(win[i + 2]),
[s03]"f"(src0[i + 3]), [wj3]"f"(win[j - 3]), [s13]"f"(src1[j - 3]),
[wi3]"f"(win[i + 3])
: "memory"
);
__asm__ volatile (
"mul.s %[temp], %[s1], %[wi] \n\t"
"mul.s %[temp1], %[s1], %[wj] \n\t"
"mul.s %[temp2], %[s11], %[wi1] \n\t"
"mul.s %[temp3], %[s11], %[wj1] \n\t"
"msub.s %[temp], %[temp], %[s0], %[wj] \n\t"
"madd.s %[temp1], %[temp1], %[s0], %[wi] \n\t"
"msub.s %[temp2], %[temp2], %[s01], %[wj1] \n\t"
"madd.s %[temp3], %[temp3], %[s01], %[wi1] \n\t"
"swc1 %[temp], 0(%[dst_i2]) \n\t" /* dst[i] = s0*wj - s1*wi; */
"swc1 %[temp1], 0(%[dst_j2]) \n\t" /* dst[j] = s0*wi + s1*wj; */
"swc1 %[temp2], 4(%[dst_i2]) \n\t" /* dst[i+1] = s01*wj1 - s11*wi1; */
"swc1 %[temp3], -4(%[dst_j2]) \n\t" /* dst[j-1] = s01*wi1 + s11*wj1; */
"mul.s %[temp4], %[s12], %[wi2] \n\t"
"mul.s %[temp5], %[s12], %[wj2] \n\t"
"mul.s %[temp6], %[s13], %[wi3] \n\t"
"mul.s %[temp7], %[s13], %[wj3] \n\t"
"msub.s %[temp4], %[temp4], %[s02], %[wj2] \n\t"
"madd.s %[temp5], %[temp5], %[s02], %[wi2] \n\t"
"msub.s %[temp6], %[temp6], %[s03], %[wj3] \n\t"
"madd.s %[temp7], %[temp7], %[s03], %[wi3] \n\t"
"swc1 %[temp4], 8(%[dst_i2]) \n\t" /* dst[i+2] = s02*wj2 - s12*wi2; */
"swc1 %[temp5], -8(%[dst_j2]) \n\t" /* dst[j-2] = s02*wi2 + s12*wj2; */
"swc1 %[temp6], 12(%[dst_i2]) \n\t" /* dst[i+2] = s03*wj3 - s13*wi3; */
"swc1 %[temp7], -12(%[dst_j2]) \n\t" /* dst[j-3] = s03*wi3 + s13*wj3; */
: [temp]"=&f"(temp),
[temp1]"=&f"(temp1), [temp2]"=&f"(temp2), [temp3]"=&f"(temp3),
[temp4]"=&f"(temp4), [temp5]"=&f"(temp5), [temp6]"=&f"(temp6),
[temp7] "=&f" (temp7)
: [dst_j2]"r"(dst_j2), [dst_i2]"r"(dst_i2),
[s0] "f"(src0[i + 4]), [wj] "f"(win[j - 4]), [s1] "f"(src1[j - 4]),
[wi] "f"(win[i + 4]), [s01]"f"(src0[i + 5]),[wj1]"f"(win[j - 5]),
[s11]"f"(src1[j - 5]), [wi1]"f"(win[i + 5]), [s02]"f"(src0[i + 6]),
[wj2]"f"(win[j - 6]), [s12]"f"(src1[j - 6]),[wi2]"f"(win[i + 6]),
[s03]"f"(src0[i + 7]), [wj3]"f"(win[j - 7]), [s13]"f"(src1[j - 7]),
[wi3]"f"(win[i + 7])
: "memory"
);
}
}
#endif /* HAVE_INLINE_ASM */
av_cold void ff_dsputil_init_mips( DSPContext* c, AVCodecContext *avctx )
{
#if HAVE_INLINE_ASM
c->vector_fmul_window = vector_fmul_window_mips;
#endif
}

View File

@@ -0,0 +1,67 @@
/*
* Copyright (c) 2012
* MIPS Technologies, Inc., California.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* Author: Stanislav Ocovaj (socovaj@mips.com)
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* definitions and initialization of LUT table for MIPS FFT
*/
#include "fft_table.h"
uint16_t fft_offsets_lut[0x2aab];
void ff_fft_lut_init(uint16_t *table, int off, int size, int *index)
{
if (size < 16) {
table[*index] = off >> 2;
(*index)++;
}
else {
ff_fft_lut_init(table, off, size>>1, index);
ff_fft_lut_init(table, off+(size>>1), size>>2, index);
ff_fft_lut_init(table, off+3*(size>>2), size>>2, index);
}
}

View File

@@ -0,0 +1,530 @@
/*
* Copyright (c) 2012
* MIPS Technologies, Inc., California.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* Author: Stanislav Ocovaj (socovaj@mips.com)
* Author: Zoran Lukic (zoranl@mips.com)
*
* Optimized MDCT/IMDCT and FFT transforms
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
#include "libavcodec/fft.h"
#include "fft_table.h"
/**
* FFT transform
*/
#if HAVE_INLINE_ASM
static void ff_fft_calc_mips(FFTContext *s, FFTComplex *z)
{
int nbits, i, n, num_transforms, offset, step;
int n4, n2, n34;
FFTSample tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8;
FFTComplex *tmpz;
float w_re, w_im;
float *w_re_ptr, *w_im_ptr;
const int fft_size = (1 << s->nbits);
int s_n = s->nbits;
int tem1, tem2;
float pom, pom1, pom2, pom3;
float temp, temp1, temp3, temp4;
FFTComplex * tmpz_n2, * tmpz_n34, * tmpz_n4;
FFTComplex * tmpz_n2_i, * tmpz_n34_i, * tmpz_n4_i, * tmpz_i;
/**
*num_transforms = (0x2aab >> (16 - s->nbits)) | 1;
*/
__asm__ volatile (
"li %[tem1], 16 \n\t"
"sub %[s_n], %[tem1], %[s_n] \n\t"
"li %[tem2], 10923 \n\t"
"srav %[tem2], %[tem2], %[s_n] \n\t"
"ori %[num_t],%[tem2], 1 \n\t"
: [num_t]"=r"(num_transforms), [s_n]"+r"(s_n),
[tem1]"=&r"(tem1), [tem2]"=&r"(tem2)
);
for (n=0; n<num_transforms; n++) {
offset = fft_offsets_lut[n] << 2;
tmpz = z + offset;
tmp1 = tmpz[0].re + tmpz[1].re;
tmp5 = tmpz[2].re + tmpz[3].re;
tmp2 = tmpz[0].im + tmpz[1].im;
tmp6 = tmpz[2].im + tmpz[3].im;
tmp3 = tmpz[0].re - tmpz[1].re;
tmp8 = tmpz[2].im - tmpz[3].im;
tmp4 = tmpz[0].im - tmpz[1].im;
tmp7 = tmpz[2].re - tmpz[3].re;
tmpz[0].re = tmp1 + tmp5;
tmpz[2].re = tmp1 - tmp5;
tmpz[0].im = tmp2 + tmp6;
tmpz[2].im = tmp2 - tmp6;
tmpz[1].re = tmp3 + tmp8;
tmpz[3].re = tmp3 - tmp8;
tmpz[1].im = tmp4 - tmp7;
tmpz[3].im = tmp4 + tmp7;
}
if (fft_size < 8)
return;
num_transforms = (num_transforms >> 1) | 1;
for (n=0; n<num_transforms; n++) {
offset = fft_offsets_lut[n] << 3;
tmpz = z + offset;
__asm__ volatile (
"lwc1 %[tmp1], 32(%[tmpz]) \n\t"
"lwc1 %[pom], 40(%[tmpz]) \n\t"
"lwc1 %[tmp3], 48(%[tmpz]) \n\t"
"lwc1 %[pom1], 56(%[tmpz]) \n\t"
"lwc1 %[tmp2], 36(%[tmpz]) \n\t"
"lwc1 %[pom2], 44(%[tmpz]) \n\t"
"lwc1 %[pom3], 60(%[tmpz]) \n\t"
"lwc1 %[tmp4], 52(%[tmpz]) \n\t"
"add.s %[tmp1], %[tmp1], %[pom] \n\t" // tmp1 = tmpz[4].re + tmpz[5].re;
"add.s %[tmp3], %[tmp3], %[pom1] \n\t" // tmp3 = tmpz[6].re + tmpz[7].re;
"add.s %[tmp2], %[tmp2], %[pom2] \n\t" // tmp2 = tmpz[4].im + tmpz[5].im;
"lwc1 %[pom], 40(%[tmpz]) \n\t"
"add.s %[tmp4], %[tmp4], %[pom3] \n\t" // tmp4 = tmpz[6].im + tmpz[7].im;
"add.s %[tmp5], %[tmp1], %[tmp3] \n\t" // tmp5 = tmp1 + tmp3;
"sub.s %[tmp7], %[tmp1], %[tmp3] \n\t" // tmp7 = tmp1 - tmp3;
"lwc1 %[tmp1], 32(%[tmpz]) \n\t"
"lwc1 %[pom1], 44(%[tmpz]) \n\t"
"add.s %[tmp6], %[tmp2], %[tmp4] \n\t" // tmp6 = tmp2 + tmp4;
"sub.s %[tmp8], %[tmp2], %[tmp4] \n\t" // tmp8 = tmp2 - tmp4;
"lwc1 %[tmp2], 36(%[tmpz]) \n\t"
"lwc1 %[pom2], 56(%[tmpz]) \n\t"
"lwc1 %[pom3], 60(%[tmpz]) \n\t"
"lwc1 %[tmp3], 48(%[tmpz]) \n\t"
"lwc1 %[tmp4], 52(%[tmpz]) \n\t"
"sub.s %[tmp1], %[tmp1], %[pom] \n\t" // tmp1 = tmpz[4].re - tmpz[5].re;
"lwc1 %[pom], 0(%[tmpz]) \n\t"
"sub.s %[tmp2], %[tmp2], %[pom1] \n\t" // tmp2 = tmpz[4].im - tmpz[5].im;
"sub.s %[tmp3], %[tmp3], %[pom2] \n\t" // tmp3 = tmpz[6].re - tmpz[7].re;
"lwc1 %[pom2], 4(%[tmpz]) \n\t"
"sub.s %[pom1], %[pom], %[tmp5] \n\t"
"sub.s %[tmp4], %[tmp4], %[pom3] \n\t" // tmp4 = tmpz[6].im - tmpz[7].im;
"add.s %[pom3], %[pom], %[tmp5] \n\t"
"sub.s %[pom], %[pom2], %[tmp6] \n\t"
"add.s %[pom2], %[pom2], %[tmp6] \n\t"
"swc1 %[pom1], 32(%[tmpz]) \n\t" // tmpz[4].re = tmpz[0].re - tmp5;
"swc1 %[pom3], 0(%[tmpz]) \n\t" // tmpz[0].re = tmpz[0].re + tmp5;
"swc1 %[pom], 36(%[tmpz]) \n\t" // tmpz[4].im = tmpz[0].im - tmp6;
"swc1 %[pom2], 4(%[tmpz]) \n\t" // tmpz[0].im = tmpz[0].im + tmp6;
"lwc1 %[pom1], 16(%[tmpz]) \n\t"
"lwc1 %[pom3], 20(%[tmpz]) \n\t"
"li.s %[pom], 0.7071067812 \n\t" // float pom = 0.7071067812f;
"add.s %[temp1],%[tmp1], %[tmp2] \n\t"
"sub.s %[temp], %[pom1], %[tmp8] \n\t"
"add.s %[pom2], %[pom3], %[tmp7] \n\t"
"sub.s %[temp3],%[tmp3], %[tmp4] \n\t"
"sub.s %[temp4],%[tmp2], %[tmp1] \n\t"
"swc1 %[temp], 48(%[tmpz]) \n\t" // tmpz[6].re = tmpz[2].re - tmp8;
"swc1 %[pom2], 52(%[tmpz]) \n\t" // tmpz[6].im = tmpz[2].im + tmp7;
"add.s %[pom1], %[pom1], %[tmp8] \n\t"
"sub.s %[pom3], %[pom3], %[tmp7] \n\t"
"add.s %[tmp3], %[tmp3], %[tmp4] \n\t"
"mul.s %[tmp5], %[pom], %[temp1] \n\t" // tmp5 = pom * (tmp1 + tmp2);
"mul.s %[tmp7], %[pom], %[temp3] \n\t" // tmp7 = pom * (tmp3 - tmp4);
"mul.s %[tmp6], %[pom], %[temp4] \n\t" // tmp6 = pom * (tmp2 - tmp1);
"mul.s %[tmp8], %[pom], %[tmp3] \n\t" // tmp8 = pom * (tmp3 + tmp4);
"swc1 %[pom1], 16(%[tmpz]) \n\t" // tmpz[2].re = tmpz[2].re + tmp8;
"swc1 %[pom3], 20(%[tmpz]) \n\t" // tmpz[2].im = tmpz[2].im - tmp7;
"add.s %[tmp1], %[tmp5], %[tmp7] \n\t" // tmp1 = tmp5 + tmp7;
"sub.s %[tmp3], %[tmp5], %[tmp7] \n\t" // tmp3 = tmp5 - tmp7;
"add.s %[tmp2], %[tmp6], %[tmp8] \n\t" // tmp2 = tmp6 + tmp8;
"sub.s %[tmp4], %[tmp6], %[tmp8] \n\t" // tmp4 = tmp6 - tmp8;
"lwc1 %[temp], 8(%[tmpz]) \n\t"
"lwc1 %[temp1],12(%[tmpz]) \n\t"
"lwc1 %[pom], 24(%[tmpz]) \n\t"
"lwc1 %[pom2], 28(%[tmpz]) \n\t"
"sub.s %[temp4],%[temp], %[tmp1] \n\t"
"sub.s %[temp3],%[temp1], %[tmp2] \n\t"
"add.s %[temp], %[temp], %[tmp1] \n\t"
"add.s %[temp1],%[temp1], %[tmp2] \n\t"
"sub.s %[pom1], %[pom], %[tmp4] \n\t"
"add.s %[pom3], %[pom2], %[tmp3] \n\t"
"add.s %[pom], %[pom], %[tmp4] \n\t"
"sub.s %[pom2], %[pom2], %[tmp3] \n\t"
"swc1 %[temp4],40(%[tmpz]) \n\t" // tmpz[5].re = tmpz[1].re - tmp1;
"swc1 %[temp3],44(%[tmpz]) \n\t" // tmpz[5].im = tmpz[1].im - tmp2;
"swc1 %[temp], 8(%[tmpz]) \n\t" // tmpz[1].re = tmpz[1].re + tmp1;
"swc1 %[temp1],12(%[tmpz]) \n\t" // tmpz[1].im = tmpz[1].im + tmp2;
"swc1 %[pom1], 56(%[tmpz]) \n\t" // tmpz[7].re = tmpz[3].re - tmp4;
"swc1 %[pom3], 60(%[tmpz]) \n\t" // tmpz[7].im = tmpz[3].im + tmp3;
"swc1 %[pom], 24(%[tmpz]) \n\t" // tmpz[3].re = tmpz[3].re + tmp4;
"swc1 %[pom2], 28(%[tmpz]) \n\t" // tmpz[3].im = tmpz[3].im - tmp3;
: [tmp1]"=&f"(tmp1), [pom]"=&f"(pom), [pom1]"=&f"(pom1), [pom2]"=&f"(pom2),
[tmp3]"=&f"(tmp3), [tmp2]"=&f"(tmp2), [tmp4]"=&f"(tmp4), [tmp5]"=&f"(tmp5), [tmp7]"=&f"(tmp7),
[tmp6]"=&f"(tmp6), [tmp8]"=&f"(tmp8), [pom3]"=&f"(pom3),[temp]"=&f"(temp), [temp1]"=&f"(temp1),
[temp3]"=&f"(temp3), [temp4]"=&f"(temp4)
: [tmpz]"r"(tmpz)
: "memory"
);
}
step = 1 << (MAX_LOG2_NFFT - 4);
n4 = 4;
for (nbits=4; nbits<=s->nbits; nbits++) {
/*
* num_transforms = (num_transforms >> 1) | 1;
*/
__asm__ volatile (
"sra %[num_t], %[num_t], 1 \n\t"
"ori %[num_t], %[num_t], 1 \n\t"
: [num_t] "+r" (num_transforms)
);
n2 = 2 * n4;
n34 = 3 * n4;
for (n=0; n<num_transforms; n++) {
offset = fft_offsets_lut[n] << nbits;
tmpz = z + offset;
tmpz_n2 = tmpz + n2;
tmpz_n4 = tmpz + n4;
tmpz_n34 = tmpz + n34;
__asm__ volatile (
"lwc1 %[pom1], 0(%[tmpz_n2]) \n\t"
"lwc1 %[pom], 0(%[tmpz_n34]) \n\t"
"lwc1 %[pom2], 4(%[tmpz_n2]) \n\t"
"lwc1 %[pom3], 4(%[tmpz_n34]) \n\t"
"lwc1 %[temp1],0(%[tmpz]) \n\t"
"lwc1 %[temp3],4(%[tmpz]) \n\t"
"add.s %[tmp5], %[pom1], %[pom] \n\t" // tmp5 = tmpz[ n2].re + tmpz[n34].re;
"sub.s %[tmp1], %[pom1], %[pom] \n\t" // tmp1 = tmpz[ n2].re - tmpz[n34].re;
"add.s %[tmp6], %[pom2], %[pom3] \n\t" // tmp6 = tmpz[ n2].im + tmpz[n34].im;
"sub.s %[tmp2], %[pom2], %[pom3] \n\t" // tmp2 = tmpz[ n2].im - tmpz[n34].im;
"sub.s %[temp], %[temp1], %[tmp5] \n\t"
"add.s %[temp1],%[temp1], %[tmp5] \n\t"
"sub.s %[temp4],%[temp3], %[tmp6] \n\t"
"add.s %[temp3],%[temp3], %[tmp6] \n\t"
"swc1 %[temp], 0(%[tmpz_n2]) \n\t" // tmpz[ n2].re = tmpz[ 0].re - tmp5;
"swc1 %[temp1],0(%[tmpz]) \n\t" // tmpz[ 0].re = tmpz[ 0].re + tmp5;
"lwc1 %[pom1], 0(%[tmpz_n4]) \n\t"
"swc1 %[temp4],4(%[tmpz_n2]) \n\t" // tmpz[ n2].im = tmpz[ 0].im - tmp6;
"lwc1 %[temp], 4(%[tmpz_n4]) \n\t"
"swc1 %[temp3],4(%[tmpz]) \n\t" // tmpz[ 0].im = tmpz[ 0].im + tmp6;
"sub.s %[pom], %[pom1], %[tmp2] \n\t"
"add.s %[pom1], %[pom1], %[tmp2] \n\t"
"add.s %[temp1],%[temp], %[tmp1] \n\t"
"sub.s %[temp], %[temp], %[tmp1] \n\t"
"swc1 %[pom], 0(%[tmpz_n34]) \n\t" // tmpz[n34].re = tmpz[n4].re - tmp2;
"swc1 %[pom1], 0(%[tmpz_n4]) \n\t" // tmpz[ n4].re = tmpz[n4].re + tmp2;
"swc1 %[temp1],4(%[tmpz_n34]) \n\t" // tmpz[n34].im = tmpz[n4].im + tmp1;
"swc1 %[temp], 4(%[tmpz_n4]) \n\t" // tmpz[ n4].im = tmpz[n4].im - tmp1;
: [tmp5]"=&f"(tmp5),
[tmp1]"=&f"(tmp1), [pom]"=&f"(pom), [pom1]"=&f"(pom1), [pom2]"=&f"(pom2),
[tmp2]"=&f"(tmp2), [tmp6]"=&f"(tmp6), [pom3]"=&f"(pom3),
[temp]"=&f"(temp), [temp1]"=&f"(temp1), [temp3]"=&f"(temp3), [temp4]"=&f"(temp4)
: [tmpz]"r"(tmpz), [tmpz_n2]"r"(tmpz_n2), [tmpz_n34]"r"(tmpz_n34), [tmpz_n4]"r"(tmpz_n4)
: "memory"
);
w_re_ptr = (float*)(ff_cos_65536 + step);
w_im_ptr = (float*)(ff_cos_65536 + MAX_FFT_SIZE/4 - step);
for (i=1; i<n4; i++) {
w_re = w_re_ptr[0];
w_im = w_im_ptr[0];
tmpz_n2_i = tmpz_n2 + i;
tmpz_n4_i = tmpz_n4 + i;
tmpz_n34_i= tmpz_n34 + i;
tmpz_i = tmpz + i;
__asm__ volatile (
"lwc1 %[temp], 0(%[tmpz_n2_i]) \n\t"
"lwc1 %[temp1], 4(%[tmpz_n2_i]) \n\t"
"lwc1 %[pom], 0(%[tmpz_n34_i]) \n\t"
"lwc1 %[pom1], 4(%[tmpz_n34_i]) \n\t"
"mul.s %[temp3], %[w_im], %[temp] \n\t"
"mul.s %[temp4], %[w_im], %[temp1] \n\t"
"mul.s %[pom2], %[w_im], %[pom1] \n\t"
"mul.s %[pom3], %[w_im], %[pom] \n\t"
"msub.s %[tmp2], %[temp3], %[w_re], %[temp1] \n\t" // tmp2 = w_re * tmpz[ n2+i].im - w_im * tmpz[ n2+i].re;
"madd.s %[tmp1], %[temp4], %[w_re], %[temp] \n\t" // tmp1 = w_re * tmpz[ n2+i].re + w_im * tmpz[ n2+i].im;
"msub.s %[tmp3], %[pom2], %[w_re], %[pom] \n\t" // tmp3 = w_re * tmpz[n34+i].re - w_im * tmpz[n34+i].im;
"madd.s %[tmp4], %[pom3], %[w_re], %[pom1] \n\t" // tmp4 = w_re * tmpz[n34+i].im + w_im * tmpz[n34+i].re;
"lwc1 %[temp], 0(%[tmpz_i]) \n\t"
"lwc1 %[pom], 4(%[tmpz_i]) \n\t"
"add.s %[tmp5], %[tmp1], %[tmp3] \n\t" // tmp5 = tmp1 + tmp3;
"sub.s %[tmp1], %[tmp1], %[tmp3] \n\t" // tmp1 = tmp1 - tmp3;
"add.s %[tmp6], %[tmp2], %[tmp4] \n\t" // tmp6 = tmp2 + tmp4;
"sub.s %[tmp2], %[tmp2], %[tmp4] \n\t" // tmp2 = tmp2 - tmp4;
"sub.s %[temp1], %[temp], %[tmp5] \n\t"
"add.s %[temp], %[temp], %[tmp5] \n\t"
"sub.s %[pom1], %[pom], %[tmp6] \n\t"
"add.s %[pom], %[pom], %[tmp6] \n\t"
"lwc1 %[temp3], 0(%[tmpz_n4_i]) \n\t"
"lwc1 %[pom2], 4(%[tmpz_n4_i]) \n\t"
"swc1 %[temp1], 0(%[tmpz_n2_i]) \n\t" // tmpz[ n2+i].re = tmpz[ i].re - tmp5;
"swc1 %[temp], 0(%[tmpz_i]) \n\t" // tmpz[ i].re = tmpz[ i].re + tmp5;
"swc1 %[pom1], 4(%[tmpz_n2_i]) \n\t" // tmpz[ n2+i].im = tmpz[ i].im - tmp6;
"swc1 %[pom] , 4(%[tmpz_i]) \n\t" // tmpz[ i].im = tmpz[ i].im + tmp6;
"sub.s %[temp4], %[temp3], %[tmp2] \n\t"
"add.s %[pom3], %[pom2], %[tmp1] \n\t"
"add.s %[temp3], %[temp3], %[tmp2] \n\t"
"sub.s %[pom2], %[pom2], %[tmp1] \n\t"
"swc1 %[temp4], 0(%[tmpz_n34_i]) \n\t" // tmpz[n34+i].re = tmpz[n4+i].re - tmp2;
"swc1 %[pom3], 4(%[tmpz_n34_i]) \n\t" // tmpz[n34+i].im = tmpz[n4+i].im + tmp1;
"swc1 %[temp3], 0(%[tmpz_n4_i]) \n\t" // tmpz[ n4+i].re = tmpz[n4+i].re + tmp2;
"swc1 %[pom2], 4(%[tmpz_n4_i]) \n\t" // tmpz[ n4+i].im = tmpz[n4+i].im - tmp1;
: [tmp1]"=&f"(tmp1), [tmp2]"=&f" (tmp2), [temp]"=&f"(temp), [tmp3]"=&f"(tmp3),
[tmp4]"=&f"(tmp4), [tmp5]"=&f"(tmp5), [tmp6]"=&f"(tmp6),
[temp1]"=&f"(temp1), [temp3]"=&f"(temp3), [temp4]"=&f"(temp4),
[pom]"=&f"(pom), [pom1]"=&f"(pom1), [pom2]"=&f"(pom2), [pom3]"=&f"(pom3)
: [w_re]"f"(w_re), [w_im]"f"(w_im),
[tmpz_i]"r"(tmpz_i),[tmpz_n2_i]"r"(tmpz_n2_i),
[tmpz_n34_i]"r"(tmpz_n34_i), [tmpz_n4_i]"r"(tmpz_n4_i)
: "memory"
);
w_re_ptr += step;
w_im_ptr -= step;
}
}
step >>= 1;
n4 <<= 1;
}
}
/**
* MDCT/IMDCT transforms.
*/
static void ff_imdct_half_mips(FFTContext *s, FFTSample *output, const FFTSample *input)
{
int k, n8, n4, n2, n, j;
const uint16_t *revtab = s->revtab;
const FFTSample *tcos = s->tcos;
const FFTSample *tsin = s->tsin;
const FFTSample *in1, *in2, *in3, *in4;
FFTComplex *z = (FFTComplex *)output;
int j1;
const float *tcos1, *tsin1, *tcos2, *tsin2;
float temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8,
temp9, temp10, temp11, temp12, temp13, temp14, temp15, temp16;
FFTComplex *z1, *z2;
n = 1 << s->mdct_bits;
n2 = n >> 1;
n4 = n >> 2;
n8 = n >> 3;
/* pre rotation */
in1 = input;
in2 = input + n2 - 1;
in3 = input + 2;
in4 = input + n2 - 3;
tcos1 = tcos;
tsin1 = tsin;
/* n4 = 64 or 128 */
for(k = 0; k < n4; k += 2) {
j = revtab[k ];
j1 = revtab[k + 1];
__asm__ volatile (
"lwc1 %[temp1], 0(%[in2]) \t\n"
"lwc1 %[temp2], 0(%[tcos1]) \t\n"
"lwc1 %[temp3], 0(%[tsin1]) \t\n"
"lwc1 %[temp4], 0(%[in1]) \t\n"
"lwc1 %[temp5], 0(%[in4]) \t\n"
"mul.s %[temp9], %[temp1], %[temp2] \t\n"
"mul.s %[temp10], %[temp1], %[temp3] \t\n"
"lwc1 %[temp6], 4(%[tcos1]) \t\n"
"lwc1 %[temp7], 4(%[tsin1]) \t\n"
"nmsub.s %[temp9], %[temp9], %[temp4], %[temp3] \t\n"
"madd.s %[temp10], %[temp10], %[temp4], %[temp2] \t\n"
"mul.s %[temp11], %[temp5], %[temp6] \t\n"
"mul.s %[temp12], %[temp5], %[temp7] \t\n"
"lwc1 %[temp8], 0(%[in3]) \t\n"
"addiu %[tcos1], %[tcos1], 8 \t\n"
"addiu %[tsin1], %[tsin1], 8 \t\n"
"addiu %[in1], %[in1], 16 \t\n"
"nmsub.s %[temp11], %[temp11], %[temp8], %[temp7] \t\n"
"madd.s %[temp12], %[temp12], %[temp8], %[temp6] \t\n"
"addiu %[in2], %[in2], -16 \t\n"
"addiu %[in3], %[in3], 16 \t\n"
"addiu %[in4], %[in4], -16 \t\n"
: [temp1]"=&f"(temp1), [temp2]"=&f"(temp2),
[temp3]"=&f"(temp3), [temp4]"=&f"(temp4),
[temp5]"=&f"(temp5), [temp6]"=&f"(temp6),
[temp7]"=&f"(temp7), [temp8]"=&f"(temp8),
[temp9]"=&f"(temp9), [temp10]"=&f"(temp10),
[temp11]"=&f"(temp11), [temp12]"=&f"(temp12),
[tsin1]"+r"(tsin1), [tcos1]"+r"(tcos1),
[in1]"+r"(in1), [in2]"+r"(in2),
[in3]"+r"(in3), [in4]"+r"(in4)
);
z[j ].re = temp9;
z[j ].im = temp10;
z[j1].re = temp11;
z[j1].im = temp12;
}
s->fft_calc(s, z);
/* post rotation + reordering */
/* n8 = 32 or 64 */
for(k = 0; k < n8; k += 2) {
tcos1 = &tcos[n8 - k - 2];
tsin1 = &tsin[n8 - k - 2];
tcos2 = &tcos[n8 + k];
tsin2 = &tsin[n8 + k];
z1 = &z[n8 - k - 2];
z2 = &z[n8 + k ];
__asm__ volatile (
"lwc1 %[temp1], 12(%[z1]) \t\n"
"lwc1 %[temp2], 4(%[tsin1]) \t\n"
"lwc1 %[temp3], 4(%[tcos1]) \t\n"
"lwc1 %[temp4], 8(%[z1]) \t\n"
"lwc1 %[temp5], 4(%[z1]) \t\n"
"mul.s %[temp9], %[temp1], %[temp2] \t\n"
"mul.s %[temp10], %[temp1], %[temp3] \t\n"
"lwc1 %[temp6], 0(%[tsin1]) \t\n"
"lwc1 %[temp7], 0(%[tcos1]) \t\n"
"nmsub.s %[temp9], %[temp9], %[temp4], %[temp3] \t\n"
"madd.s %[temp10], %[temp10], %[temp4], %[temp2] \t\n"
"mul.s %[temp11], %[temp5], %[temp6] \t\n"
"mul.s %[temp12], %[temp5], %[temp7] \t\n"
"lwc1 %[temp8], 0(%[z1]) \t\n"
"lwc1 %[temp1], 4(%[z2]) \t\n"
"lwc1 %[temp2], 0(%[tsin2]) \t\n"
"lwc1 %[temp3], 0(%[tcos2]) \t\n"
"nmsub.s %[temp11], %[temp11], %[temp8], %[temp7] \t\n"
"madd.s %[temp12], %[temp12], %[temp8], %[temp6] \t\n"
"mul.s %[temp13], %[temp1], %[temp2] \t\n"
"mul.s %[temp14], %[temp1], %[temp3] \t\n"
"lwc1 %[temp4], 0(%[z2]) \t\n"
"lwc1 %[temp5], 12(%[z2]) \t\n"
"lwc1 %[temp6], 4(%[tsin2]) \t\n"
"lwc1 %[temp7], 4(%[tcos2]) \t\n"
"nmsub.s %[temp13], %[temp13], %[temp4], %[temp3] \t\n"
"madd.s %[temp14], %[temp14], %[temp4], %[temp2] \t\n"
"mul.s %[temp15], %[temp5], %[temp6] \t\n"
"mul.s %[temp16], %[temp5], %[temp7] \t\n"
"lwc1 %[temp8], 8(%[z2]) \t\n"
"nmsub.s %[temp15], %[temp15], %[temp8], %[temp7] \t\n"
"madd.s %[temp16], %[temp16], %[temp8], %[temp6] \t\n"
: [temp1]"=&f"(temp1), [temp2]"=&f"(temp2),
[temp3]"=&f"(temp3), [temp4]"=&f"(temp4),
[temp5]"=&f"(temp5), [temp6]"=&f"(temp6),
[temp7]"=&f"(temp7), [temp8]"=&f"(temp8),
[temp9]"=&f"(temp9), [temp10]"=&f"(temp10),
[temp11]"=&f"(temp11), [temp12]"=&f"(temp12),
[temp13]"=&f"(temp13), [temp14]"=&f"(temp14),
[temp15]"=&f"(temp15), [temp16]"=&f"(temp16)
: [z1]"r"(z1), [z2]"r"(z2),
[tsin1]"r"(tsin1), [tcos1]"r"(tcos1),
[tsin2]"r"(tsin2), [tcos2]"r"(tcos2)
);
z1[1].re = temp9;
z1[1].im = temp14;
z2[0].re = temp13;
z2[0].im = temp10;
z1[0].re = temp11;
z1[0].im = temp16;
z2[1].re = temp15;
z2[1].im = temp12;
}
}
/**
* Compute inverse MDCT of size N = 2^nbits
* @param output N samples
* @param input N/2 samples
*/
static void ff_imdct_calc_mips(FFTContext *s, FFTSample *output, const FFTSample *input)
{
int k;
int n = 1 << s->mdct_bits;
int n2 = n >> 1;
int n4 = n >> 2;
ff_imdct_half_mips(s, output+n4, input);
for(k = 0; k < n4; k+=4) {
output[k] = -output[n2-k-1];
output[k+1] = -output[n2-k-2];
output[k+2] = -output[n2-k-3];
output[k+3] = -output[n2-k-4];
output[n-k-1] = output[n2+k];
output[n-k-2] = output[n2+k+1];
output[n-k-3] = output[n2+k+2];
output[n-k-4] = output[n2+k+3];
}
}
#endif /* HAVE_INLINE_ASM */
av_cold void ff_fft_init_mips(FFTContext *s)
{
int n=0;
ff_fft_lut_init(fft_offsets_lut, 0, 1 << 16, &n);
#if HAVE_INLINE_ASM
s->fft_calc = ff_fft_calc_mips;
#if CONFIG_MDCT
s->imdct_calc = ff_imdct_calc_mips;
s->imdct_half = ff_imdct_half_mips;
#endif
#endif
}

View File

@@ -0,0 +1,63 @@
/*
* Copyright (c) 2012
* MIPS Technologies, Inc., California.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* Author: Stanislav Ocovaj (socovaj@mips.com)
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* definitions and LUT table for MIPS FFT
*/
#ifndef AVCODEC_MIPS_FFT_TABLE_H
#define AVCODEC_MIPS_FFT_TABLE_H
#include "libavcodec/fft.h"
#define MAX_LOG2_NFFT 16 //!< Specifies maxiumum allowed fft size
#define MAX_FFT_SIZE (1 << MAX_LOG2_NFFT)
extern uint16_t fft_offsets_lut[];
void ff_fft_lut_init(uint16_t *table, int off, int size, int *index);
#endif /* AVCODEC_MIPS_FFT_TABLE_H */

View File

@@ -0,0 +1,342 @@
/*
* Format Conversion Utils for MIPS
*
* Copyright (c) 2012
* MIPS Technologies, Inc., California.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the MIPS Technologies, Inc., nor the names of is
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* Author: Zoran Lukic (zoranl@mips.com)
* Author: Nedeljko Babic (nbabic@mips.com)
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
#include "libavcodec/avcodec.h"
#include "libavcodec/fmtconvert.h"
#if HAVE_INLINE_ASM
#if HAVE_MIPSDSPR1
static void float_to_int16_mips(int16_t *dst, const float *src, long len)
{
const float *src_end = src + len;
int ret0, ret1, ret2, ret3, ret4, ret5, ret6, ret7;
float src0, src1, src2, src3, src4, src5, src6, src7;
/*
* loop is 8 times unrolled in assembler in order to achieve better performance
*/
__asm__ volatile(
"beq %[len], $zero, fti16_end%= \n\t"
"fti16_lp%=: \n\t"
"lwc1 %[src0], 0(%[src]) \n\t"
"lwc1 %[src1], 4(%[src]) \n\t"
"lwc1 %[src2], 8(%[src]) \n\t"
"lwc1 %[src3], 12(%[src]) \n\t"
"cvt.w.s %[src0], %[src0] \n\t"
"cvt.w.s %[src1], %[src1] \n\t"
"cvt.w.s %[src2], %[src2] \n\t"
"cvt.w.s %[src3], %[src3] \n\t"
"mfc1 %[ret0], %[src0] \n\t"
"mfc1 %[ret1], %[src1] \n\t"
"mfc1 %[ret2], %[src2] \n\t"
"mfc1 %[ret3], %[src3] \n\t"
"lwc1 %[src4], 16(%[src]) \n\t"
"lwc1 %[src5], 20(%[src]) \n\t"
"lwc1 %[src6], 24(%[src]) \n\t"
"lwc1 %[src7], 28(%[src]) \n\t"
"cvt.w.s %[src4], %[src4] \n\t"
"cvt.w.s %[src5], %[src5] \n\t"
"cvt.w.s %[src6], %[src6] \n\t"
"cvt.w.s %[src7], %[src7] \n\t"
"addiu %[src], 32 \n\t"
"shll_s.w %[ret0], %[ret0], 16 \n\t"
"shll_s.w %[ret1], %[ret1], 16 \n\t"
"shll_s.w %[ret2], %[ret2], 16 \n\t"
"shll_s.w %[ret3], %[ret3], 16 \n\t"
"srl %[ret0], %[ret0], 16 \n\t"
"srl %[ret1], %[ret1], 16 \n\t"
"srl %[ret2], %[ret2], 16 \n\t"
"srl %[ret3], %[ret3], 16 \n\t"
"sh %[ret0], 0(%[dst]) \n\t"
"sh %[ret1], 2(%[dst]) \n\t"
"sh %[ret2], 4(%[dst]) \n\t"
"sh %[ret3], 6(%[dst]) \n\t"
"mfc1 %[ret4], %[src4] \n\t"
"mfc1 %[ret5], %[src5] \n\t"
"mfc1 %[ret6], %[src6] \n\t"
"mfc1 %[ret7], %[src7] \n\t"
"shll_s.w %[ret4], %[ret4], 16 \n\t"
"shll_s.w %[ret5], %[ret5], 16 \n\t"
"shll_s.w %[ret6], %[ret6], 16 \n\t"
"shll_s.w %[ret7], %[ret7], 16 \n\t"
"srl %[ret4], %[ret4], 16 \n\t"
"srl %[ret5], %[ret5], 16 \n\t"
"srl %[ret6], %[ret6], 16 \n\t"
"srl %[ret7], %[ret7], 16 \n\t"
"sh %[ret4], 8(%[dst]) \n\t"
"sh %[ret5], 10(%[dst]) \n\t"
"sh %[ret6], 12(%[dst]) \n\t"
"sh %[ret7], 14(%[dst]) \n\t"
"addiu %[dst], 16 \n\t"
"bne %[src], %[src_end], fti16_lp%= \n\t"
"fti16_end%=: \n\t"
: [ret0]"=&r"(ret0), [ret1]"=&r"(ret1), [ret2]"=&r"(ret2), [ret3]"=&r"(ret3),
[ret4]"=&r"(ret4), [ret5]"=&r"(ret5), [ret6]"=&r"(ret6), [ret7]"=&r"(ret7),
[src0]"=&f"(src0), [src1]"=&f"(src1), [src2]"=&f"(src2), [src3]"=&f"(src3),
[src4]"=&f"(src4), [src5]"=&f"(src5), [src6]"=&f"(src6), [src7]"=&f"(src7),
[src]"+r"(src), [dst]"+r"(dst)
: [src_end]"r"(src_end), [len]"r"(len)
: "memory"
);
}
static void float_to_int16_interleave_mips(int16_t *dst, const float **src, long len,
int channels)
{
int c, ch2 = channels <<1;
int ret0, ret1, ret2, ret3, ret4, ret5, ret6, ret7;
float src0, src1, src2, src3, src4, src5, src6, src7;
int16_t *dst_ptr0, *dst_ptr1, *dst_ptr2, *dst_ptr3;
int16_t *dst_ptr4, *dst_ptr5, *dst_ptr6, *dst_ptr7;
const float *src_ptr, *src_ptr2, *src_end;
if (channels == 2) {
src_ptr = &src[0][0];
src_ptr2 = &src[1][0];
src_end = src_ptr + len;
__asm__ volatile (
"fti16i2_lp%=: \n\t"
"lwc1 %[src0], 0(%[src_ptr]) \n\t"
"lwc1 %[src1], 0(%[src_ptr2]) \n\t"
"addiu %[src_ptr], 4 \n\t"
"cvt.w.s $f9, %[src0] \n\t"
"cvt.w.s $f10, %[src1] \n\t"
"mfc1 %[ret0], $f9 \n\t"
"mfc1 %[ret1], $f10 \n\t"
"shll_s.w %[ret0], %[ret0], 16 \n\t"
"shll_s.w %[ret1], %[ret1], 16 \n\t"
"addiu %[src_ptr2], 4 \n\t"
"srl %[ret0], %[ret0], 16 \n\t"
"srl %[ret1], %[ret1], 16 \n\t"
"sh %[ret0], 0(%[dst]) \n\t"
"sh %[ret1], 2(%[dst]) \n\t"
"addiu %[dst], 4 \n\t"
"bne %[src_ptr], %[src_end], fti16i2_lp%= \n\t"
: [ret0]"=&r"(ret0), [ret1]"=&r"(ret1),
[src0]"=&f"(src0), [src1]"=&f"(src1),
[src_ptr]"+r"(src_ptr), [src_ptr2]"+r"(src_ptr2),
[dst]"+r"(dst)
: [src_end]"r"(src_end)
: "memory"
);
} else {
for (c = 0; c < channels; c++) {
src_ptr = &src[c][0];
dst_ptr0 = &dst[c];
src_end = src_ptr + len;
/*
* loop is 8 times unrolled in assembler in order to achieve better performance
*/
__asm__ volatile(
"fti16i_lp%=: \n\t"
"lwc1 %[src0], 0(%[src_ptr]) \n\t"
"lwc1 %[src1], 4(%[src_ptr]) \n\t"
"lwc1 %[src2], 8(%[src_ptr]) \n\t"
"lwc1 %[src3], 12(%[src_ptr]) \n\t"
"cvt.w.s %[src0], %[src0] \n\t"
"cvt.w.s %[src1], %[src1] \n\t"
"cvt.w.s %[src2], %[src2] \n\t"
"cvt.w.s %[src3], %[src3] \n\t"
"mfc1 %[ret0], %[src0] \n\t"
"mfc1 %[ret1], %[src1] \n\t"
"mfc1 %[ret2], %[src2] \n\t"
"mfc1 %[ret3], %[src3] \n\t"
"lwc1 %[src4], 16(%[src_ptr]) \n\t"
"lwc1 %[src5], 20(%[src_ptr]) \n\t"
"lwc1 %[src6], 24(%[src_ptr]) \n\t"
"lwc1 %[src7], 28(%[src_ptr]) \n\t"
"addu %[dst_ptr1], %[dst_ptr0], %[ch2] \n\t"
"addu %[dst_ptr2], %[dst_ptr1], %[ch2] \n\t"
"addu %[dst_ptr3], %[dst_ptr2], %[ch2] \n\t"
"addu %[dst_ptr4], %[dst_ptr3], %[ch2] \n\t"
"addu %[dst_ptr5], %[dst_ptr4], %[ch2] \n\t"
"addu %[dst_ptr6], %[dst_ptr5], %[ch2] \n\t"
"addu %[dst_ptr7], %[dst_ptr6], %[ch2] \n\t"
"addiu %[src_ptr], 32 \n\t"
"cvt.w.s %[src4], %[src4] \n\t"
"cvt.w.s %[src5], %[src5] \n\t"
"cvt.w.s %[src6], %[src6] \n\t"
"cvt.w.s %[src7], %[src7] \n\t"
"shll_s.w %[ret0], %[ret0], 16 \n\t"
"shll_s.w %[ret1], %[ret1], 16 \n\t"
"shll_s.w %[ret2], %[ret2], 16 \n\t"
"shll_s.w %[ret3], %[ret3], 16 \n\t"
"srl %[ret0], %[ret0], 16 \n\t"
"srl %[ret1], %[ret1], 16 \n\t"
"srl %[ret2], %[ret2], 16 \n\t"
"srl %[ret3], %[ret3], 16 \n\t"
"sh %[ret0], 0(%[dst_ptr0]) \n\t"
"sh %[ret1], 0(%[dst_ptr1]) \n\t"
"sh %[ret2], 0(%[dst_ptr2]) \n\t"
"sh %[ret3], 0(%[dst_ptr3]) \n\t"
"mfc1 %[ret4], %[src4] \n\t"
"mfc1 %[ret5], %[src5] \n\t"
"mfc1 %[ret6], %[src6] \n\t"
"mfc1 %[ret7], %[src7] \n\t"
"shll_s.w %[ret4], %[ret4], 16 \n\t"
"shll_s.w %[ret5], %[ret5], 16 \n\t"
"shll_s.w %[ret6], %[ret6], 16 \n\t"
"shll_s.w %[ret7], %[ret7], 16 \n\t"
"srl %[ret4], %[ret4], 16 \n\t"
"srl %[ret5], %[ret5], 16 \n\t"
"srl %[ret6], %[ret6], 16 \n\t"
"srl %[ret7], %[ret7], 16 \n\t"
"sh %[ret4], 0(%[dst_ptr4]) \n\t"
"sh %[ret5], 0(%[dst_ptr5]) \n\t"
"sh %[ret6], 0(%[dst_ptr6]) \n\t"
"sh %[ret7], 0(%[dst_ptr7]) \n\t"
"addu %[dst_ptr0], %[dst_ptr7], %[ch2] \n\t"
"bne %[src_ptr], %[src_end], fti16i_lp%= \n\t"
: [ret0]"=&r"(ret0), [ret1]"=&r"(ret1), [ret2]"=&r"(ret2), [ret3]"=&r"(ret3),
[ret4]"=&r"(ret4), [ret5]"=&r"(ret5), [ret6]"=&r"(ret6), [ret7]"=&r"(ret7),
[src0]"=&f"(src0), [src1]"=&f"(src1), [src2]"=&f"(src2), [src3]"=&f"(src3),
[src4]"=&f"(src4), [src5]"=&f"(src5), [src6]"=&f"(src6), [src7]"=&f"(src7),
[dst_ptr1]"=&r"(dst_ptr1), [dst_ptr2]"=&r"(dst_ptr2), [dst_ptr3]"=&r"(dst_ptr3),
[dst_ptr4]"=&r"(dst_ptr4), [dst_ptr5]"=&r"(dst_ptr5), [dst_ptr6]"=&r"(dst_ptr6),
[dst_ptr7]"=&r"(dst_ptr7), [dst_ptr0]"+r"(dst_ptr0), [src_ptr]"+r"(src_ptr)
: [ch2]"r"(ch2), [src_end]"r"(src_end)
: "memory"
);
}
}
}
#endif /* HAVE_MIPSDSPR1 */
static void int32_to_float_fmul_scalar_mips(float *dst, const int *src,
float mul, int len)
{
/*
* variables used in inline assembler
*/
float temp1, temp3, temp5, temp7, temp9, temp11, temp13, temp15;
int rpom1, rpom2, rpom11, rpom21, rpom12, rpom22, rpom13, rpom23;
const int *src_end = src + len;
/*
* loop is 8 times unrolled in assembler in order to achieve better performance
*/
__asm__ volatile (
"i32tf_lp%=: \n\t"
"lw %[rpom11], 0(%[src]) \n\t"
"lw %[rpom21], 4(%[src]) \n\t"
"lw %[rpom1], 8(%[src]) \n\t"
"lw %[rpom2], 12(%[src]) \n\t"
"mtc1 %[rpom11], %[temp1] \n\t"
"mtc1 %[rpom21], %[temp3] \n\t"
"mtc1 %[rpom1], %[temp5] \n\t"
"mtc1 %[rpom2], %[temp7] \n\t"
"lw %[rpom13], 16(%[src]) \n\t"
"lw %[rpom23], 20(%[src]) \n\t"
"lw %[rpom12], 24(%[src]) \n\t"
"lw %[rpom22], 28(%[src]) \n\t"
"mtc1 %[rpom13], %[temp9] \n\t"
"mtc1 %[rpom23], %[temp11] \n\t"
"mtc1 %[rpom12], %[temp13] \n\t"
"mtc1 %[rpom22], %[temp15] \n\t"
"addiu %[src], 32 \n\t"
"cvt.s.w %[temp1], %[temp1] \n\t"
"cvt.s.w %[temp3], %[temp3] \n\t"
"cvt.s.w %[temp5], %[temp5] \n\t"
"cvt.s.w %[temp7], %[temp7] \n\t"
"cvt.s.w %[temp9], %[temp9] \n\t"
"cvt.s.w %[temp11], %[temp11] \n\t"
"cvt.s.w %[temp13], %[temp13] \n\t"
"cvt.s.w %[temp15], %[temp15] \n\t"
"mul.s %[temp1], %[temp1], %[mul] \n\t"
"mul.s %[temp3], %[temp3], %[mul] \n\t"
"mul.s %[temp5], %[temp5], %[mul] \n\t"
"mul.s %[temp7], %[temp7], %[mul] \n\t"
"mul.s %[temp9], %[temp9], %[mul] \n\t"
"mul.s %[temp11], %[temp11], %[mul] \n\t"
"mul.s %[temp13], %[temp13], %[mul] \n\t"
"mul.s %[temp15], %[temp15], %[mul] \n\t"
"swc1 %[temp1], 0(%[dst]) \n\t" /*dst[i] = src[i] * mul; */
"swc1 %[temp3], 4(%[dst]) \n\t" /*dst[i+1] = src[i+1] * mul;*/
"swc1 %[temp5], 8(%[dst]) \n\t" /*dst[i+2] = src[i+2] * mul;*/
"swc1 %[temp7], 12(%[dst]) \n\t" /*dst[i+3] = src[i+3] * mul;*/
"swc1 %[temp9], 16(%[dst]) \n\t" /*dst[i+4] = src[i+4] * mul;*/
"swc1 %[temp11], 20(%[dst]) \n\t" /*dst[i+5] = src[i+5] * mul;*/
"swc1 %[temp13], 24(%[dst]) \n\t" /*dst[i+6] = src[i+6] * mul;*/
"swc1 %[temp15], 28(%[dst]) \n\t" /*dst[i+7] = src[i+7] * mul;*/
"addiu %[dst], 32 \n\t"
"bne %[src], %[src_end], i32tf_lp%= \n\t"
: [temp1]"=&f"(temp1), [temp11]"=&f"(temp11),
[temp13]"=&f"(temp13), [temp15]"=&f"(temp15),
[temp3]"=&f"(temp3), [temp5]"=&f"(temp5),
[temp7]"=&f"(temp7), [temp9]"=&f"(temp9),
[rpom1]"=&r"(rpom1), [rpom2]"=&r"(rpom2),
[rpom11]"=&r"(rpom11), [rpom21]"=&r"(rpom21),
[rpom12]"=&r"(rpom12), [rpom22]"=&r"(rpom22),
[rpom13]"=&r"(rpom13), [rpom23]"=&r"(rpom23),
[dst]"+r"(dst), [src]"+r"(src)
: [mul]"f"(mul), [src_end]"r"(src_end)
: "memory"
);
}
#endif /* HAVE_INLINE_ASM */
av_cold void ff_fmt_convert_init_mips(FmtConvertContext *c)
{
#if HAVE_INLINE_ASM
#if HAVE_MIPSDSPR1
c->float_to_int16_interleave = float_to_int16_interleave_mips;
c->float_to_int16 = float_to_int16_mips;
#endif
c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_mips;
#endif
}

View File

@@ -0,0 +1,108 @@
/*
* Copyright (c) 2012
* MIPS Technologies, Inc., California.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* Author: Nedeljko Babic (nbabic@mips.com)
*
* LSP routines for ACELP-based codecs optimized for MIPS
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Reference: libavcodec/lsp.c
*/
#ifndef AVCODEC_LSP_MIPS_H
#define AVCODEC_LSP_MIPS_H
#if HAVE_MIPSFPU && HAVE_INLINE_ASM
static av_always_inline void ff_lsp2polyf_mips(const double *lsp, double *f, int lp_half_order)
{
int i, j = 0;
double * p_fi = f;
double * p_f = 0;
f[0] = 1.0;
f[1] = -2 * lsp[0];
lsp -= 2;
for(i=2; i<=lp_half_order; i++)
{
double tmp, f_j_2, f_j_1, f_j;
double val = lsp[2*i];
__asm__ volatile(
"move %[p_f], %[p_fi] \n\t"
"add.d %[val], %[val], %[val] \n\t"
"addiu %[p_fi], 8 \n\t"
"ldc1 %[f_j_1], 0(%[p_f]) \n\t"
"ldc1 %[f_j], 8(%[p_f]) \n\t"
"neg.d %[val], %[val] \n\t"
"add.d %[tmp], %[f_j_1], %[f_j_1] \n\t"
"madd.d %[tmp], %[tmp], %[f_j], %[val] \n\t"
"addiu %[j], %[i], -2 \n\t"
"ldc1 %[f_j_2], -8(%[p_f]) \n\t"
"sdc1 %[tmp], 16(%[p_f]) \n\t"
"beqz %[j], ff_lsp2polyf_lp_j_end%= \n\t"
"ff_lsp2polyf_lp_j%=: \n\t"
"add.d %[tmp], %[f_j], %[f_j_2] \n\t"
"madd.d %[tmp], %[tmp], %[f_j_1], %[val] \n\t"
"mov.d %[f_j], %[f_j_1] \n\t"
"addiu %[j], -1 \n\t"
"mov.d %[f_j_1], %[f_j_2] \n\t"
"ldc1 %[f_j_2], -16(%[p_f]) \n\t"
"sdc1 %[tmp], 8(%[p_f]) \n\t"
"addiu %[p_f], -8 \n\t"
"bgtz %[j], ff_lsp2polyf_lp_j%= \n\t"
"ff_lsp2polyf_lp_j_end%=: \n\t"
: [f_j_2]"=&f"(f_j_2), [f_j_1]"=&f"(f_j_1), [val]"+f"(val),
[tmp]"=&f"(tmp), [f_j]"=&f"(f_j), [p_f]"+r"(p_f),
[j]"+r"(j), [p_fi]"+r"(p_fi)
: [i]"r"(i)
);
f[1] += val;
}
}
#define ff_lsp2polyf ff_lsp2polyf_mips
#endif /* HAVE_MIPSFPU && HAVE_INLINE_ASM */
#endif /* AVCODEC_LSP_MIPS_H */

View File

@@ -0,0 +1,82 @@
/*
* Copyright (c) 2009 Mans Rullgard <mans@mansr.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_MIPS_MATHOPS_H
#define AVCODEC_MIPS_MATHOPS_H
#include <stdint.h>
#include "config.h"
#include "libavutil/common.h"
#if HAVE_INLINE_ASM
#if HAVE_LOONGSON
static inline av_const int64_t MAC64(int64_t d, int a, int b)
{
int64_t m;
__asm__ ("dmult.g %1, %2, %3 \n\t"
"daddu %0, %0, %1 \n\t"
: "+r"(d), "=&r"(m) : "r"(a), "r"(b));
return d;
}
#define MAC64(d, a, b) ((d) = MAC64(d, a, b))
static inline av_const int64_t MLS64(int64_t d, int a, int b)
{
int64_t m;
__asm__ ("dmult.g %1, %2, %3 \n\t"
"dsubu %0, %0, %1 \n\t"
: "+r"(d), "=&r"(m) : "r"(a), "r"(b));
return d;
}
#define MLS64(d, a, b) ((d) = MLS64(d, a, b))
#elif ARCH_MIPS64
static inline av_const int64_t MAC64(int64_t d, int a, int b)
{
int64_t m;
__asm__ ("dmult %2, %3 \n\t"
"mflo %1 \n\t"
"daddu %0, %0, %1 \n\t"
: "+r"(d), "=&r"(m) : "r"(a), "r"(b)
: "hi", "lo");
return d;
}
#define MAC64(d, a, b) ((d) = MAC64(d, a, b))
static inline av_const int64_t MLS64(int64_t d, int a, int b)
{
int64_t m;
__asm__ ("dmult %2, %3 \n\t"
"mflo %1 \n\t"
"dsubu %0, %0, %1 \n\t"
: "+r"(d), "=&r"(m) : "r"(a), "r"(b)
: "hi", "lo");
return d;
}
#define MLS64(d, a, b) ((d) = MLS64(d, a, b))
#endif
#endif /* HAVE_INLINE_ASM */
#endif /* AVCODEC_MIPS_MATHOPS_H */

View File

@@ -0,0 +1,903 @@
/*
* Copyright (c) 2012
* MIPS Technologies, Inc., California.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* Author: Bojan Zivkovic (bojan@mips.com)
*
* MPEG Audio decoder optimized for MIPS fixed-point architecture
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Reference: libavcodec/mpegaudiodsp_template.c
*/
#include <string.h>
#include "libavcodec/mpegaudiodsp.h"
static void ff_mpadsp_apply_window_mips_fixed(int32_t *synth_buf, int32_t *window,
int *dither_state, int16_t *samples, int incr)
{
register const int32_t *w, *w2, *p;
int j;
int16_t *samples2;
int w_asm, p_asm, w_asm1, p_asm1, w_asm2, p_asm2;
int w2_asm, w2_asm1, *p_temp1, *p_temp2;
int sum1 = 0;
int const min_asm = -32768, max_asm = 32767;
int temp1, temp2 = 0, temp3 = 0;
int64_t sum;
/* copy to avoid wrap */
memcpy(synth_buf + 512, synth_buf, 32 * sizeof(*synth_buf));
samples2 = samples + 31 * incr;
w = window;
w2 = window + 31;
sum = *dither_state;
p = synth_buf + 16;
p_temp1 = synth_buf + 16;
p_temp2 = synth_buf + 48;
temp1 = sum;
/**
* use of round_sample function from the original code is eliminated,
* changed with appropriate assembly instructions.
*/
__asm__ volatile (
"mthi $zero \n\t"
"mtlo %[temp1] \n\t"
"lw %[w_asm], 0(%[w]) \n\t"
"lw %[p_asm], 0(%[p]) \n\t"
"lw %[w_asm1], 64*4(%[w]) \n\t"
"lw %[p_asm1], 64*4(%[p]) \n\t"
"lw %[w_asm2], 128*4(%[w]) \n\t"
"lw %[p_asm2], 128*4(%[p]) \n\t"
"madd %[w_asm], %[p_asm] \n\t"
"madd %[w_asm1], %[p_asm1] \n\t"
"madd %[w_asm2], %[p_asm2] \n\t"
"lw %[w_asm], 192*4(%[w]) \n\t"
"lw %[p_asm], 192*4(%[p]) \n\t"
"lw %[w_asm1], 256*4(%[w]) \n\t"
"lw %[p_asm1], 256*4(%[p]) \n\t"
"lw %[w_asm2], 320*4(%[w]) \n\t"
"lw %[p_asm2], 320*4(%[p]) \n\t"
"madd %[w_asm], %[p_asm] \n\t"
"madd %[w_asm1], %[p_asm1] \n\t"
"madd %[w_asm2], %[p_asm2] \n\t"
"lw %[w_asm], 384*4(%[w]) \n\t"
"lw %[p_asm], 384*4(%[p]) \n\t"
"lw %[w_asm1], 448*4(%[w]) \n\t"
"lw %[p_asm1], 448*4(%[p]) \n\t"
"lw %[w_asm2], 32*4(%[w]) \n\t"
"lw %[p_asm2], 32*4(%[p]) \n\t"
"madd %[w_asm], %[p_asm] \n\t"
"madd %[w_asm1], %[p_asm1] \n\t"
"msub %[w_asm2], %[p_asm2] \n\t"
"lw %[w_asm], 96*4(%[w]) \n\t"
"lw %[p_asm], 96*4(%[p]) \n\t"
"lw %[w_asm1], 160*4(%[w]) \n\t"
"lw %[p_asm1], 160*4(%[p]) \n\t"
"lw %[w_asm2], 224*4(%[w]) \n\t"
"lw %[p_asm2], 224*4(%[p]) \n\t"
"msub %[w_asm], %[p_asm] \n\t"
"msub %[w_asm1], %[p_asm1] \n\t"
"msub %[w_asm2], %[p_asm2] \n\t"
"lw %[w_asm], 288*4(%[w]) \n\t"
"lw %[p_asm], 288*4(%[p]) \n\t"
"lw %[w_asm1], 352*4(%[w]) \n\t"
"lw %[p_asm1], 352*4(%[p]) \n\t"
"msub %[w_asm], %[p_asm] \n\t"
"lw %[w_asm], 480*4(%[w]) \n\t"
"lw %[p_asm], 480*4(%[p]) \n\t"
"lw %[w_asm2], 416*4(%[w]) \n\t"
"lw %[p_asm2], 416*4(%[p]) \n\t"
"msub %[w_asm], %[p_asm] \n\t"
"msub %[w_asm1], %[p_asm1] \n\t"
"msub %[w_asm2], %[p_asm2] \n\t"
/*round_sample function from the original code is eliminated,
* changed with appropriate assembly instructions
* code example:
"extr.w %[sum1],$ac0,24 \n\t"
"mflo %[temp3], $ac0 \n\t"
"and %[temp1], %[temp3], 0x00ffffff \n\t"
"slt %[temp2], %[sum1], %[min_asm] \n\t"
"movn %[sum1], %[min_asm],%[temp2] \n\t"
"slt %[temp2], %[max_asm],%[sum1] \n\t"
"movn %[sum1], %[max_asm],%[temp2] \n\t"
"sh %[sum1], 0(%[samples]) \n\t"
*/
"extr.w %[sum1], $ac0, 24 \n\t"
"mflo %[temp3] \n\t"
"addi %[w], %[w], 4 \n\t"
"and %[temp1], %[temp3], 0x00ffffff \n\t"
"slt %[temp2], %[sum1], %[min_asm] \n\t"
"movn %[sum1], %[min_asm], %[temp2] \n\t"
"slt %[temp2], %[max_asm], %[sum1] \n\t"
"movn %[sum1], %[max_asm], %[temp2] \n\t"
"sh %[sum1], 0(%[samples]) \n\t"
: [w_asm] "=&r" (w_asm), [p_asm] "=&r" (p_asm), [w_asm1] "=&r" (w_asm1),
[p_asm1] "=&r" (p_asm1), [temp1] "+r" (temp1), [temp2] "+r" (temp2),
[w_asm2] "=&r" (w_asm2), [p_asm2] "=&r" (p_asm2),
[sum1] "+r" (sum1), [w] "+r" (w), [temp3] "+r" (temp3)
: [p] "r" (p), [samples] "r" (samples), [min_asm] "r" (min_asm),
[max_asm] "r" (max_asm)
: "hi","lo"
);
samples += incr;
/* we calculate two samples at the same time to avoid one memory
access per two sample */
for(j = 1; j < 16; j++) {
__asm__ volatile (
"mthi $0, $ac1 \n\t"
"mtlo $0, $ac1 \n\t"
"mthi $0 \n\t"
"mtlo %[temp1] \n\t"
"addi %[p_temp1], %[p_temp1], 4 \n\t"
"lw %[w_asm], 0(%[w]) \n\t"
"lw %[p_asm], 0(%[p_temp1]) \n\t"
"lw %[w2_asm], 0(%[w2]) \n\t"
"lw %[w_asm1], 64*4(%[w]) \n\t"
"lw %[p_asm1], 64*4(%[p_temp1]) \n\t"
"lw %[w2_asm1], 64*4(%[w2]) \n\t"
"madd %[w_asm], %[p_asm] \n\t"
"msub $ac1, %[w2_asm], %[p_asm] \n\t"
"madd %[w_asm1], %[p_asm1] \n\t"
"msub $ac1, %[w2_asm1], %[p_asm1] \n\t"
"lw %[w_asm], 128*4(%[w]) \n\t"
"lw %[p_asm], 128*4(%[p_temp1]) \n\t"
"lw %[w2_asm], 128*4(%[w2]) \n\t"
"lw %[w_asm1], 192*4(%[w]) \n\t"
"lw %[p_asm1], 192*4(%[p_temp1]) \n\t"
"lw %[w2_asm1], 192*4(%[w2]) \n\t"
"madd %[w_asm], %[p_asm] \n\t"
"msub $ac1, %[w2_asm], %[p_asm] \n\t"
"madd %[w_asm1], %[p_asm1] \n\t"
"msub $ac1, %[w2_asm1], %[p_asm1] \n\t"
"lw %[w_asm], 256*4(%[w]) \n\t"
"lw %[p_asm], 256*4(%[p_temp1]) \n\t"
"lw %[w2_asm], 256*4(%[w2]) \n\t"
"lw %[w_asm1], 320*4(%[w]) \n\t"
"lw %[p_asm1], 320*4(%[p_temp1]) \n\t"
"lw %[w2_asm1], 320*4(%[w2]) \n\t"
"madd %[w_asm], %[p_asm] \n\t"
"msub $ac1, %[w2_asm], %[p_asm] \n\t"
"madd %[w_asm1], %[p_asm1] \n\t"
"msub $ac1, %[w2_asm1], %[p_asm1] \n\t"
"lw %[w_asm], 384*4(%[w]) \n\t"
"lw %[p_asm], 384*4(%[p_temp1]) \n\t"
"lw %[w2_asm], 384*4(%[w2]) \n\t"
"lw %[w_asm1], 448*4(%[w]) \n\t"
"lw %[p_asm1], 448*4(%[p_temp1]) \n\t"
"lw %[w2_asm1], 448*4(%[w2]) \n\t"
"madd %[w_asm], %[p_asm] \n\t"
"msub $ac1, %[w2_asm], %[p_asm] \n\t"
"madd %[w_asm1], %[p_asm1] \n\t"
"msub $ac1, %[w2_asm1], %[p_asm1] \n\t"
"addi %[p_temp2], %[p_temp2], -4 \n\t"
"lw %[w_asm], 32*4(%[w]) \n\t"
"lw %[p_asm], 0(%[p_temp2]) \n\t"
"lw %[w2_asm], 32*4(%[w2]) \n\t"
"lw %[w_asm1], 96*4(%[w]) \n\t"
"lw %[p_asm1], 64*4(%[p_temp2]) \n\t"
"lw %[w2_asm1], 96*4(%[w2]) \n\t"
"msub %[w_asm], %[p_asm] \n\t"
"msub $ac1, %[w2_asm], %[p_asm] \n\t"
"msub %[w_asm1], %[p_asm1] \n\t"
"msub $ac1, %[w2_asm1], %[p_asm1] \n\t"
"lw %[w_asm], 160*4(%[w]) \n\t"
"lw %[p_asm], 128*4(%[p_temp2]) \n\t"
"lw %[w2_asm], 160*4(%[w2]) \n\t"
"lw %[w_asm1], 224*4(%[w]) \n\t"
"lw %[p_asm1], 192*4(%[p_temp2]) \n\t"
"lw %[w2_asm1], 224*4(%[w2]) \n\t"
"msub %[w_asm], %[p_asm] \n\t"
"msub $ac1, %[w2_asm], %[p_asm] \n\t"
"msub %[w_asm1], %[p_asm1] \n\t"
"msub $ac1, %[w2_asm1], %[p_asm1] \n\t"
"lw %[w_asm], 288*4(%[w]) \n\t"
"lw %[p_asm], 256*4(%[p_temp2]) \n\t"
"lw %[w2_asm], 288*4(%[w2]) \n\t"
"lw %[w_asm1], 352*4(%[w]) \n\t"
"lw %[p_asm1], 320*4(%[p_temp2]) \n\t"
"lw %[w2_asm1], 352*4(%[w2]) \n\t"
"msub %[w_asm], %[p_asm] \n\t"
"msub $ac1, %[w2_asm], %[p_asm] \n\t"
"msub %[w_asm1], %[p_asm1] \n\t"
"msub $ac1, %[w2_asm1], %[p_asm1] \n\t"
"lw %[w_asm], 416*4(%[w]) \n\t"
"lw %[p_asm], 384*4(%[p_temp2]) \n\t"
"lw %[w2_asm], 416*4(%[w2]) \n\t"
"lw %[w_asm1], 480*4(%[w]) \n\t"
"lw %[p_asm1], 448*4(%[p_temp2]) \n\t"
"lw %[w2_asm1], 480*4(%[w2]) \n\t"
"msub %[w_asm], %[p_asm] \n\t"
"msub %[w_asm1], %[p_asm1] \n\t"
"msub $ac1, %[w2_asm], %[p_asm] \n\t"
"msub $ac1, %[w2_asm1], %[p_asm1] \n\t"
"addi %[w], %[w], 4 \n\t"
"addi %[w2], %[w2], -4 \n\t"
"mflo %[temp2] \n\t"
"extr.w %[sum1], $ac0, 24 \n\t"
"li %[temp3], 1 \n\t"
"and %[temp1], %[temp2], 0x00ffffff \n\t"
"madd $ac1, %[temp1], %[temp3] \n\t"
"slt %[temp2], %[sum1], %[min_asm] \n\t"
"movn %[sum1], %[min_asm], %[temp2] \n\t"
"slt %[temp2], %[max_asm], %[sum1] \n\t"
"movn %[sum1], %[max_asm], %[temp2] \n\t"
"sh %[sum1], 0(%[samples]) \n\t"
"mflo %[temp3], $ac1 \n\t"
"extr.w %[sum1], $ac1, 24 \n\t"
"and %[temp1], %[temp3], 0x00ffffff \n\t"
"slt %[temp2], %[sum1], %[min_asm] \n\t"
"movn %[sum1], %[min_asm], %[temp2] \n\t"
"slt %[temp2], %[max_asm], %[sum1] \n\t"
"movn %[sum1], %[max_asm], %[temp2] \n\t"
"sh %[sum1], 0(%[samples2]) \n\t"
: [w_asm] "=&r" (w_asm), [p_asm] "=&r" (p_asm), [w_asm1] "=&r" (w_asm1),
[p_asm1] "=&r" (p_asm1), [w2_asm1] "=&r" (w2_asm1),
[w2_asm] "=&r" (w2_asm), [temp1] "+r" (temp1), [temp2] "+r" (temp2),
[p_temp1] "+r" (p_temp1), [p_temp2] "+r" (p_temp2), [sum1] "+r" (sum1),
[w] "+r" (w), [w2] "+r" (w2), [samples] "+r" (samples),
[samples2] "+r" (samples2), [temp3] "+r" (temp3)
: [min_asm] "r" (min_asm), [max_asm] "r" (max_asm)
: "hi", "lo"
);
samples += incr;
samples2 -= incr;
}
p = synth_buf + 32;
__asm__ volatile (
"mthi $0 \n\t"
"mtlo %[temp1] \n\t"
"lw %[w_asm], 32*4(%[w]) \n\t"
"lw %[p_asm], 0(%[p]) \n\t"
"lw %[w_asm1], 96*4(%[w]) \n\t"
"lw %[p_asm1], 64*4(%[p]) \n\t"
"lw %[w_asm2], 160*4(%[w]) \n\t"
"lw %[p_asm2], 128*4(%[p]) \n\t"
"msub %[w_asm], %[p_asm] \n\t"
"msub %[w_asm1], %[p_asm1] \n\t"
"msub %[w_asm2], %[p_asm2] \n\t"
"lw %[w_asm], 224*4(%[w]) \n\t"
"lw %[p_asm], 192*4(%[p]) \n\t"
"lw %[w_asm1], 288*4(%[w]) \n\t"
"lw %[p_asm1], 256*4(%[p]) \n\t"
"lw %[w_asm2], 352*4(%[w]) \n\t"
"lw %[p_asm2], 320*4(%[p]) \n\t"
"msub %[w_asm], %[p_asm] \n\t"
"msub %[w_asm1], %[p_asm1] \n\t"
"msub %[w_asm2], %[p_asm2] \n\t"
"lw %[w_asm], 416*4(%[w]) \n\t"
"lw %[p_asm], 384*4(%[p]) \n\t"
"lw %[w_asm1], 480*4(%[w]) \n\t"
"lw %[p_asm1], 448*4(%[p]) \n\t"
"msub %[w_asm], %[p_asm] \n\t"
"msub %[w_asm1], %[p_asm1] \n\t"
"extr.w %[sum1], $ac0, 24 \n\t"
"mflo %[temp2] \n\t"
"and %[temp1], %[temp2], 0x00ffffff \n\t"
"slt %[temp2], %[sum1], %[min_asm] \n\t"
"movn %[sum1], %[min_asm], %[temp2] \n\t"
"slt %[temp2], %[max_asm], %[sum1] \n\t"
"movn %[sum1], %[max_asm], %[temp2] \n\t"
"sh %[sum1], 0(%[samples]) \n\t"
: [w_asm] "=&r" (w_asm), [p_asm] "=&r" (p_asm), [w_asm1] "=&r" (w_asm1),
[p_asm1] "=&r" (p_asm1), [temp1] "+r" (temp1), [temp2] "+r" (temp2),
[w_asm2] "=&r" (w_asm2), [p_asm2] "=&r" (p_asm2), [sum1] "+r" (sum1)
: [w] "r" (w), [p] "r" (p), [samples] "r" (samples), [min_asm] "r" (min_asm),
[max_asm] "r" (max_asm)
: "hi", "lo"
);
*dither_state= temp1;
}
static void imdct36_mips_fixed(int *out, int *buf, int *in, int *win)
{
int j;
int t0, t1, t2, t3, s0, s1, s2, s3;
int tmp[18], *tmp1, *in1;
/* temporary variables */
int temp_reg1, temp_reg2, temp_reg3, temp_reg4, temp_reg5, temp_reg6;
int t4, t5, t6, t8, t7;
/* values defined in macros and tables are
* eliminated - they are directly loaded in appropriate variables
*/
int const C_1 = 4229717092; /* cos(pi*1/18)*2 */
int const C_2 = 4035949074; /* cos(pi*2/18)*2 */
int const C_3 = 575416510; /* -cos(pi*3/18)*2 */
int const C_3A = 3719550786; /* cos(pi*3/18)*2 */
int const C_4 = 1004831466; /* -cos(pi*4/18)*2 */
int const C_5 = 1534215534; /* -cos(pi*5/18)*2 */
int const C_7 = -1468965330; /* -cos(pi*7/18)*2 */
int const C_8 = -745813244; /* -cos(pi*8/18)*2 */
/*
* instructions of the first two loops are reorganized and loops are unrolled,
* in order to eliminate unnecessary readings and writings in array
*/
__asm__ volatile (
"lw %[t1], 17*4(%[in]) \n\t"
"lw %[t2], 16*4(%[in]) \n\t"
"lw %[t3], 15*4(%[in]) \n\t"
"lw %[t4], 14*4(%[in]) \n\t"
"addu %[t1], %[t1], %[t2] \n\t"
"addu %[t2], %[t2], %[t3] \n\t"
"addu %[t3], %[t3], %[t4] \n\t"
"lw %[t5], 13*4(%[in]) \n\t"
"addu %[t1], %[t1], %[t3] \n\t"
"sw %[t2], 16*4(%[in]) \n\t"
"lw %[t6], 12*4(%[in]) \n\t"
"sw %[t1], 17*4(%[in]) \n\t"
"addu %[t4], %[t4], %[t5] \n\t"
"addu %[t5], %[t5], %[t6] \n\t"
"lw %[t7], 11*4(%[in]) \n\t"
"addu %[t3], %[t3], %[t5] \n\t"
"sw %[t4], 14*4(%[in]) \n\t"
"lw %[t8], 10*4(%[in]) \n\t"
"sw %[t3], 15*4(%[in]) \n\t"
"addu %[t6], %[t6], %[t7] \n\t"
"addu %[t7], %[t7], %[t8] \n\t"
"sw %[t6], 12*4(%[in]) \n\t"
"addu %[t5], %[t5], %[t7] \n\t"
"lw %[t1], 9*4(%[in]) \n\t"
"lw %[t2], 8*4(%[in]) \n\t"
"sw %[t5], 13*4(%[in]) \n\t"
"addu %[t8], %[t8], %[t1] \n\t"
"addu %[t1], %[t1], %[t2] \n\t"
"sw %[t8], 10*4(%[in]) \n\t"
"addu %[t7], %[t7], %[t1] \n\t"
"lw %[t3], 7*4(%[in]) \n\t"
"lw %[t4], 6*4(%[in]) \n\t"
"sw %[t7], 11*4(%[in]) \n\t"
"addu %[t2], %[t2], %[t3] \n\t"
"addu %[t3], %[t3], %[t4] \n\t"
"sw %[t2], 8*4(%[in]) \n\t"
"addu %[t1], %[t1], %[t3] \n\t"
"lw %[t5], 5*4(%[in]) \n\t"
"lw %[t6], 4*4(%[in]) \n\t"
"sw %[t1], 9*4(%[in]) \n\t"
"addu %[t4], %[t4], %[t5] \n\t"
"addu %[t5], %[t5], %[t6] \n\t"
"sw %[t4], 6*4(%[in]) \n\t"
"addu %[t3], %[t3], %[t5] \n\t"
"lw %[t7], 3*4(%[in]) \n\t"
"lw %[t8], 2*4(%[in]) \n\t"
"sw %[t3], 7*4(%[in]) \n\t"
"addu %[t6], %[t6], %[t7] \n\t"
"addu %[t7], %[t7], %[t8] \n\t"
"sw %[t6], 4*4(%[in]) \n\t"
"addu %[t5], %[t5], %[t7] \n\t"
"lw %[t1], 1*4(%[in]) \n\t"
"lw %[t2], 0*4(%[in]) \n\t"
"sw %[t5], 5*4(%[in]) \n\t"
"addu %[t8], %[t8], %[t1] \n\t"
"addu %[t1], %[t1], %[t2] \n\t"
"sw %[t8], 2*4(%[in]) \n\t"
"addu %[t7], %[t7], %[t1] \n\t"
"sw %[t7], 3*4(%[in]) \n\t"
"sw %[t1], 1*4(%[in]) \n\t"
: [in] "+r" (in), [t1] "=&r" (t1), [t2] "=&r" (t2), [t3] "=&r" (t3),
[t4] "=&r" (t4), [t5] "=&r" (t5), [t6] "=&r" (t6),
[t7] "=&r" (t7), [t8] "=&r" (t8)
);
for(j = 0; j < 2; j++) {
tmp1 = tmp + j;
in1 = in + j;
/**
* Original constants are multiplied by two in advanced
* for assembly optimization (e.g. C_2 = 2 * C2).
* That can lead to overflow in operations where they are used.
*
* Example of the solution:
*
* in original code:
* t0 = ((int64_t)(in1[2*2] + in1[2*4]) * (int64_t)(2*C2))>>32
*
* in assembly:
* C_2 = 2 * C2;
* .
* .
* "lw %[t7], 4*4(%[in1]) \n\t"
* "lw %[t8], 8*4(%[in1]) \n\t"
* "addu %[temp_reg2],%[t7], %[t8] \n\t"
* "multu %[C_2], %[temp_reg2] \n\t"
* "mfhi %[temp_reg1] \n\t"
* "sra %[temp_reg2],%[temp_reg2],31 \n\t"
* "move %[t0], $0 \n\t"
* "movn %[t0], %[C_2], %[temp_reg2] \n\t"
* "sub %[t0], %[temp_reg1],%[t0] \n\t"
*/
__asm__ volatile (
"lw %[t7], 4*4(%[in1]) \n\t"
"lw %[t8], 8*4(%[in1]) \n\t"
"lw %[t6], 16*4(%[in1]) \n\t"
"lw %[t4], 0*4(%[in1]) \n\t"
"addu %[temp_reg2], %[t7], %[t8] \n\t"
"addu %[t2], %[t6], %[t8] \n\t"
"multu %[C_2], %[temp_reg2] \n\t"
"lw %[t5], 12*4(%[in1]) \n\t"
"sub %[t2], %[t2], %[t7] \n\t"
"sub %[t1], %[t4], %[t5] \n\t"
"sra %[t3], %[t5], 1 \n\t"
"sra %[temp_reg1], %[t2], 1 \n\t"
"addu %[t3], %[t3], %[t4] \n\t"
"sub %[temp_reg1], %[t1], %[temp_reg1] \n\t"
"sra %[temp_reg2], %[temp_reg2], 31 \n\t"
"sw %[temp_reg1], 6*4(%[tmp1]) \n\t"
"move %[t0], $0 \n\t"
"movn %[t0], %[C_2], %[temp_reg2] \n\t"
"mfhi %[temp_reg1] \n\t"
"addu %[t1], %[t1], %[t2] \n\t"
"sw %[t1], 16*4(%[tmp1]) \n\t"
"sub %[temp_reg4], %[t8], %[t6] \n\t"
"add %[temp_reg2], %[t7], %[t6] \n\t"
"mult $ac1, %[C_8], %[temp_reg4] \n\t"
"multu $ac2, %[C_4], %[temp_reg2] \n\t"
"sub %[t0], %[temp_reg1], %[t0] \n\t"
"sra %[temp_reg1], %[temp_reg2], 31 \n\t"
"move %[t2], $0 \n\t"
"movn %[t2], %[C_4], %[temp_reg1] \n\t"
"mfhi %[t1], $ac1 \n\t"
"mfhi %[temp_reg1], $ac2 \n\t"
"lw %[t6], 10*4(%[in1]) \n\t"
"lw %[t8], 14*4(%[in1]) \n\t"
"lw %[t7], 2*4(%[in1]) \n\t"
"lw %[t4], 6*4(%[in1]) \n\t"
"sub %[temp_reg3], %[t3], %[t0] \n\t"
"add %[temp_reg4], %[t3], %[t0] \n\t"
"sub %[temp_reg1], %[temp_reg1], %[temp_reg2] \n\t"
"add %[temp_reg4], %[temp_reg4], %[t1] \n\t"
"sub %[t2], %[temp_reg1], %[t2] \n\t"
"sw %[temp_reg4], 2*4(%[tmp1]) \n\t"
"sub %[temp_reg3], %[temp_reg3], %[t2] \n\t"
"add %[temp_reg1], %[t3], %[t2] \n\t"
"sw %[temp_reg3], 10*4(%[tmp1]) \n\t"
"sub %[temp_reg1], %[temp_reg1], %[t1] \n\t"
"addu %[temp_reg2], %[t6], %[t8] \n\t"
"sw %[temp_reg1], 14*4(%[tmp1]) \n\t"
"sub %[temp_reg2], %[temp_reg2], %[t7] \n\t"
"addu %[temp_reg3], %[t7], %[t6] \n\t"
"multu $ac3, %[C_3], %[temp_reg2] \n\t"
"multu %[C_1], %[temp_reg3] \n\t"
"sra %[temp_reg1], %[temp_reg2], 31 \n\t"
"move %[t1], $0 \n\t"
"sra %[temp_reg3], %[temp_reg3], 31 \n\t"
"movn %[t1], %[C_3], %[temp_reg1] \n\t"
"mfhi %[temp_reg1], $ac3 \n\t"
"mfhi %[temp_reg4] \n\t"
"move %[t2], $0 \n\t"
"movn %[t2], %[C_1], %[temp_reg3] \n\t"
"sub %[temp_reg3], %[t6], %[t8] \n\t"
"sub %[t2], %[temp_reg4], %[t2] \n\t"
"multu $ac1, %[C_7], %[temp_reg3] \n\t"
"sub %[temp_reg1], %[temp_reg1], %[temp_reg2] \n\t"
"sra %[temp_reg4], %[temp_reg3], 31 \n\t"
"sub %[t1], %[temp_reg1], %[t1] \n\t"
"move %[t3], $0 \n\t"
"sw %[t1], 4*4(%[tmp1]) \n\t"
"movn %[t3], %[C_7], %[temp_reg4] \n\t"
"multu $ac2, %[C_3A], %[t4] \n\t"
"add %[temp_reg2], %[t7], %[t8] \n\t"
"move %[t1], $0 \n\t"
"mfhi %[temp_reg4], $ac1 \n\t"
"multu $ac3,%[C_5], %[temp_reg2] \n\t"
"move %[t0], $0 \n\t"
"sra %[temp_reg1], %[temp_reg2], 31 \n\t"
"movn %[t1],%[C_5], %[temp_reg1] \n\t"
"sub %[temp_reg4], %[temp_reg4], %[temp_reg3] \n\t"
"mfhi %[temp_reg1], $ac3 \n\t"
"sra %[temp_reg3], %[t4], 31 \n\t"
"movn %[t0], %[C_3A], %[temp_reg3] \n\t"
"mfhi %[temp_reg3], $ac2 \n\t"
"sub %[t3], %[temp_reg4], %[t3] \n\t"
"add %[temp_reg4], %[t3], %[t2] \n\t"
"sub %[temp_reg1], %[temp_reg1], %[temp_reg2] \n\t"
"sub %[t1], %[temp_reg1], %[t1] \n\t"
"sub %[t0], %[temp_reg3], %[t0] \n\t"
"add %[temp_reg1], %[t2], %[t1] \n\t"
"add %[temp_reg4], %[temp_reg4], %[t0] \n\t"
"sub %[temp_reg2], %[t3], %[t1] \n\t"
"sw %[temp_reg4], 0*4(%[tmp1]) \n\t"
"sub %[temp_reg1], %[temp_reg1], %[t0] \n\t"
"sub %[temp_reg2], %[temp_reg2], %[t0] \n\t"
"sw %[temp_reg1], 12*4(%[tmp1]) \n\t"
"sw %[temp_reg2], 8*4(%[tmp1]) \n\t"
: [t7] "=&r" (t7), [temp_reg1] "=&r" (temp_reg1),
[temp_reg2] "=&r" (temp_reg2), [temp_reg4] "=&r" (temp_reg4),
[temp_reg3] "=&r" (temp_reg3), [t8] "=&r" (t8), [t0] "=&r" (t0),
[t4] "=&r" (t4), [t5] "=&r" (t5), [t6] "=&r"(t6), [t2] "=&r" (t2),
[t3] "=&r" (t3), [t1] "=&r" (t1)
: [C_2] "r" (C_2), [in1] "r" (in1), [tmp1] "r" (tmp1), [C_8] "r" (C_8),
[C_4] "r" (C_4), [C_3] "r" (C_3), [C_1] "r" (C_1), [C_7] "r" (C_7),
[C_3A] "r" (C_3A), [C_5] "r" (C_5)
: "hi", "lo"
);
}
/**
* loop is unrolled four times
*
* values defined in tables(icos36[] and icos36h[]) are not loaded from
* these tables - they are directly loaded in appropriate registers
*
*/
__asm__ volatile (
"lw %[t2], 1*4(%[tmp]) \n\t"
"lw %[t3], 3*4(%[tmp]) \n\t"
"lw %[t0], 0*4(%[tmp]) \n\t"
"lw %[t1], 2*4(%[tmp]) \n\t"
"addu %[temp_reg1], %[t3], %[t2] \n\t"
"li %[temp_reg2], 0x807D2B1E \n\t"
"move %[s1], $0 \n\t"
"multu %[temp_reg2], %[temp_reg1] \n\t"
"sra %[temp_reg1], %[temp_reg1], 31 \n\t"
"movn %[s1], %[temp_reg2], %[temp_reg1] \n\t"
"sub %[temp_reg3], %[t3], %[t2] \n\t"
"li %[temp_reg4], 0x2de5151 \n\t"
"mfhi %[temp_reg2] \n\t"
"addu %[s0], %[t1], %[t0] \n\t"
"lw %[temp_reg5], 9*4(%[win]) \n\t"
"mult $ac1, %[temp_reg4], %[temp_reg3] \n\t"
"lw %[temp_reg6], 4*9*4(%[buf]) \n\t"
"sub %[s2], %[t1], %[t0] \n\t"
"lw %[temp_reg3], 29*4(%[win]) \n\t"
"subu %[s1], %[temp_reg2], %[s1] \n\t"
"lw %[temp_reg4], 28*4(%[win]) \n\t"
"add %[t0], %[s0], %[s1] \n\t"
"extr.w %[s3], $ac1,23 \n\t"
"mult $ac2, %[t0], %[temp_reg3] \n\t"
"sub %[t1], %[s0], %[s1] \n\t"
"lw %[temp_reg1], 4*8*4(%[buf]) \n\t"
"mult %[t1], %[temp_reg5] \n\t"
"lw %[temp_reg2], 8*4(%[win]) \n\t"
"mfhi %[temp_reg3], $ac2 \n\t"
"mult $ac3, %[t0], %[temp_reg4] \n\t"
"add %[t0], %[s2], %[s3] \n\t"
"mfhi %[temp_reg5] \n\t"
"mult $ac1, %[t1], %[temp_reg2] \n\t"
"sub %[t1], %[s2], %[s3] \n\t"
"sw %[temp_reg3], 4*9*4(%[buf]) \n\t"
"mfhi %[temp_reg4], $ac3 \n\t"
"lw %[temp_reg3], 37*4(%[win]) \n\t"
"mfhi %[temp_reg2], $ac1 \n\t"
"add %[temp_reg5], %[temp_reg5], %[temp_reg6] \n\t"
"lw %[temp_reg6], 17*4(%[win]) \n\t"
"sw %[temp_reg5], 32*9*4(%[out]) \n\t"
"sw %[temp_reg4], 4*8*4(%[buf]) \n\t"
"mult %[t1], %[temp_reg6] \n\t"
"add %[temp_reg1], %[temp_reg1], %[temp_reg2] \n\t"
"lw %[temp_reg2], 0*4(%[win]) \n\t"
"lw %[temp_reg5], 4*17*4(%[buf]) \n\t"
"sw %[temp_reg1], 8*32*4(%[out]) \n\t"
"mfhi %[temp_reg6] \n\t"
"mult $ac1, %[t1], %[temp_reg2] \n\t"
"lw %[temp_reg4], 20*4(%[win]) \n\t"
"lw %[temp_reg1], 0(%[buf]) \n\t"
"mult $ac2, %[t0], %[temp_reg3] \n\t"
"mult %[t0], %[temp_reg4] \n\t"
"mfhi %[temp_reg2], $ac1 \n\t"
"lw %[t0], 4*4(%[tmp]) \n\t"
"add %[temp_reg5], %[temp_reg5], %[temp_reg6] \n\t"
"mfhi %[temp_reg3], $ac2 \n\t"
"mfhi %[temp_reg4] \n\t"
"sw %[temp_reg5], 17*32*4(%[out]) \n\t"
"lw %[t1], 6*4(%[tmp]) \n\t"
"add %[temp_reg1], %[temp_reg1], %[temp_reg2] \n\t"
"lw %[t2], 5*4(%[tmp]) \n\t"
"sw %[temp_reg1], 0*32*4(%[out]) \n\t"
"addu %[s0], %[t1], %[t0] \n\t"
"sw %[temp_reg3], 4*17*4(%[buf]) \n\t"
"lw %[t3], 7*4(%[tmp]) \n\t"
"sub %[s2], %[t1], %[t0] \n\t"
"sw %[temp_reg4], 0(%[buf]) \n\t"
"addu %[temp_reg5], %[t3], %[t2] \n\t"
"li %[temp_reg6], 0x8483EE0C \n\t"
"move %[s1], $0 \n\t"
"multu %[temp_reg6], %[temp_reg5] \n\t"
"sub %[temp_reg1], %[t3], %[t2] \n\t"
"li %[temp_reg2], 0xf746ea \n\t"
"sra %[temp_reg5], %[temp_reg5], 31 \n\t"
"mult $ac1, %[temp_reg2], %[temp_reg1] \n\t"
"movn %[s1], %[temp_reg6], %[temp_reg5] \n\t"
"mfhi %[temp_reg5] \n\t"
"lw %[temp_reg3], 10*4(%[win]) \n\t"
"lw %[temp_reg4], 4*10*4(%[buf]) \n\t"
"extr.w %[s3], $ac1, 23 \n\t"
"lw %[temp_reg1], 4*7*4(%[buf]) \n\t"
"lw %[temp_reg2], 7*4(%[win]) \n\t"
"lw %[temp_reg6], 30*4(%[win]) \n\t"
"subu %[s1], %[temp_reg5], %[s1] \n\t"
"sub %[t1], %[s0], %[s1] \n\t"
"add %[t0], %[s0], %[s1] \n\t"
"mult $ac2, %[t1], %[temp_reg3] \n\t"
"mult $ac3, %[t1], %[temp_reg2] \n\t"
"mult %[t0], %[temp_reg6] \n\t"
"lw %[temp_reg5], 27*4(%[win]) \n\t"
"mult $ac1, %[t0], %[temp_reg5] \n\t"
"mfhi %[temp_reg3], $ac2 \n\t"
"mfhi %[temp_reg2], $ac3 \n\t"
"mfhi %[temp_reg6] \n\t"
"add %[t0], %[s2], %[s3] \n\t"
"sub %[t1], %[s2], %[s3] \n\t"
"add %[temp_reg3], %[temp_reg3], %[temp_reg4] \n\t"
"lw %[temp_reg4], 16*4(%[win]) \n\t"
"mfhi %[temp_reg5], $ac1 \n\t"
"sw %[temp_reg3], 32*10*4(%[out]) \n\t"
"add %[temp_reg1], %[temp_reg1], %[temp_reg2] \n\t"
"lw %[temp_reg3], 4*16*4(%[buf]) \n\t"
"sw %[temp_reg6], 4*10*4(%[buf]) \n\t"
"sw %[temp_reg1], 7*32*4(%[out]) \n\t"
"mult $ac2, %[t1], %[temp_reg4] \n\t"
"sw %[temp_reg5], 4*7*4(%[buf]) \n\t"
"lw %[temp_reg6], 1*4(%[win]) \n\t"
"lw %[temp_reg5], 4*1*4(%[buf]) \n\t"
"lw %[temp_reg1], 36*4(%[win]) \n\t"
"mult $ac3, %[t1], %[temp_reg6] \n\t"
"lw %[temp_reg2], 21*4(%[win]) \n\t"
"mfhi %[temp_reg4], $ac2 \n\t"
"mult %[t0], %[temp_reg1] \n\t"
"mult $ac1, %[t0],%[temp_reg2] \n\t"
"lw %[t0], 8*4(%[tmp]) \n\t"
"mfhi %[temp_reg6], $ac3 \n\t"
"lw %[t1], 10*4(%[tmp]) \n\t"
"lw %[t3], 11*4(%[tmp]) \n\t"
"mfhi %[temp_reg1] \n\t"
"add %[temp_reg3], %[temp_reg3], %[temp_reg4] \n\t"
"lw %[t2], 9*4(%[tmp]) \n\t"
"mfhi %[temp_reg2], $ac1 \n\t"
"add %[temp_reg5], %[temp_reg5], %[temp_reg6] \n\t"
"sw %[temp_reg3], 16*32*4(%[out]) \n\t"
"sw %[temp_reg5], 1*32*4(%[out]) \n\t"
"sw %[temp_reg1], 4*16*4(%[buf]) \n\t"
"addu %[temp_reg3], %[t3], %[t2] \n\t"
"li %[temp_reg4], 0x8D3B7CD6 \n\t"
"sw %[temp_reg2], 4*1*4(%[buf]) \n\t"
"multu %[temp_reg4],%[temp_reg3] \n\t"
"sra %[temp_reg3], %[temp_reg3], 31 \n\t"
"move %[s1], $0 \n\t"
"movn %[s1], %[temp_reg4], %[temp_reg3] \n\t"
"addu %[s0], %[t1], %[t0] \n\t"
"mfhi %[temp_reg3] \n\t"
"sub %[s2], %[t1], %[t0] \n\t"
"sub %[temp_reg5], %[t3], %[t2] \n\t"
"li %[temp_reg6], 0x976fd9 \n\t"
"lw %[temp_reg2], 11*4(%[win]) \n\t"
"lw %[temp_reg1], 4*11*4(%[buf]) \n\t"
"mult $ac1, %[temp_reg6], %[temp_reg5] \n\t"
"subu %[s1], %[temp_reg3], %[s1] \n\t"
"lw %[temp_reg5], 31*4(%[win]) \n\t"
"sub %[t1], %[s0], %[s1] \n\t"
"add %[t0], %[s0], %[s1] \n\t"
"mult $ac2, %[t1], %[temp_reg2] \n\t"
"mult %[t0], %[temp_reg5] \n\t"
"lw %[temp_reg4], 6*4(%[win]) \n\t"
"extr.w %[s3], $ac1, 23 \n\t"
"lw %[temp_reg3], 4*6*4(%[buf]) \n\t"
"mfhi %[temp_reg2], $ac2 \n\t"
"lw %[temp_reg6], 26*4(%[win]) \n\t"
"mfhi %[temp_reg5] \n\t"
"mult $ac3, %[t1], %[temp_reg4] \n\t"
"mult $ac1, %[t0], %[temp_reg6] \n\t"
"add %[t0], %[s2], %[s3] \n\t"
"sub %[t1], %[s2], %[s3] \n\t"
"add %[temp_reg2], %[temp_reg2], %[temp_reg1] \n\t"
"mfhi %[temp_reg4], $ac3 \n\t"
"mfhi %[temp_reg6], $ac1 \n\t"
"sw %[temp_reg5], 4*11*4(%[buf]) \n\t"
"sw %[temp_reg2], 32*11*4(%[out]) \n\t"
"lw %[temp_reg1], 4*15*4(%[buf]) \n\t"
"add %[temp_reg3], %[temp_reg3], %[temp_reg4] \n\t"
"lw %[temp_reg2], 15*4(%[win]) \n\t"
"sw %[temp_reg3], 6*32*4(%[out]) \n\t"
"sw %[temp_reg6], 4*6*4(%[buf]) \n\t"
"mult %[t1], %[temp_reg2] \n\t"
"lw %[temp_reg3], 2*4(%[win]) \n\t"
"lw %[temp_reg4], 4*2*4(%[buf]) \n\t"
"lw %[temp_reg5], 35*4(%[win]) \n\t"
"mult $ac1, %[t1], %[temp_reg3] \n\t"
"mfhi %[temp_reg2] \n\t"
"lw %[temp_reg6], 22*4(%[win]) \n\t"
"mult $ac2, %[t0], %[temp_reg5] \n\t"
"lw %[t1], 14*4(%[tmp]) \n\t"
"mult $ac3, %[t0], %[temp_reg6] \n\t"
"lw %[t0], 12*4(%[tmp]) \n\t"
"mfhi %[temp_reg3], $ac1 \n\t"
"add %[temp_reg1], %[temp_reg1], %[temp_reg2] \n\t"
"mfhi %[temp_reg5], $ac2 \n\t"
"sw %[temp_reg1], 15*32*4(%[out]) \n\t"
"mfhi %[temp_reg6], $ac3 \n\t"
"lw %[t2], 13*4(%[tmp]) \n\t"
"lw %[t3], 15*4(%[tmp]) \n\t"
"add %[temp_reg4], %[temp_reg4], %[temp_reg3] \n\t"
"sw %[temp_reg5], 4*15*4(%[buf]) \n\t"
"addu %[temp_reg1], %[t3], %[t2] \n\t"
"li %[temp_reg2], 0x9C42577C \n\t"
"move %[s1], $0 \n\t"
"multu %[temp_reg2], %[temp_reg1] \n\t"
"sw %[temp_reg4], 2*32*4(%[out]) \n\t"
"sra %[temp_reg1], %[temp_reg1], 31 \n\t"
"movn %[s1], %[temp_reg2], %[temp_reg1] \n\t"
"sub %[temp_reg3], %[t3], %[t2] \n\t"
"li %[temp_reg4], 0x6f94a2 \n\t"
"mfhi %[temp_reg1] \n\t"
"addu %[s0], %[t1], %[t0] \n\t"
"sw %[temp_reg6], 4*2*4(%[buf]) \n\t"
"mult $ac1, %[temp_reg4], %[temp_reg3] \n\t"
"sub %[s2], %[t1], %[t0] \n\t"
"lw %[temp_reg5], 12*4(%[win]) \n\t"
"lw %[temp_reg6], 4*12*4(%[buf]) \n\t"
"subu %[s1], %[temp_reg1], %[s1] \n\t"
"sub %[t1], %[s0], %[s1] \n\t"
"lw %[temp_reg3], 32*4(%[win]) \n\t"
"mult $ac2, %[t1], %[temp_reg5] \n\t"
"add %[t0], %[s0], %[s1] \n\t"
"extr.w %[s3], $ac1, 23 \n\t"
"lw %[temp_reg2], 5*4(%[win]) \n\t"
"mult %[t0], %[temp_reg3] \n\t"
"mfhi %[temp_reg5], $ac2 \n\t"
"lw %[temp_reg4], 25*4(%[win]) \n\t"
"lw %[temp_reg1], 4*5*4(%[buf]) \n\t"
"mult $ac3, %[t1], %[temp_reg2] \n\t"
"mult $ac1, %[t0], %[temp_reg4] \n\t"
"mfhi %[temp_reg3] \n\t"
"add %[t0], %[s2], %[s3] \n\t"
"add %[temp_reg5], %[temp_reg5], %[temp_reg6] \n\t"
"mfhi %[temp_reg2], $ac3 \n\t"
"mfhi %[temp_reg4], $ac1 \n\t"
"sub %[t1], %[s2], %[s3] \n\t"
"sw %[temp_reg5], 32*12*4(%[out]) \n\t"
"sw %[temp_reg3], 4*12*4(%[buf]) \n\t"
"lw %[temp_reg6], 14*4(%[win]) \n\t"
"lw %[temp_reg5], 4*14*4(%[buf]) \n\t"
"add %[temp_reg1], %[temp_reg1], %[temp_reg2] \n\t"
"sw %[temp_reg4], 4*5*4(%[buf]) \n\t"
"sw %[temp_reg1], 5*32*4(%[out]) \n\t"
"mult %[t1], %[temp_reg6] \n\t"
"lw %[temp_reg4], 34*4(%[win]) \n\t"
"lw %[temp_reg2], 3*4(%[win]) \n\t"
"lw %[temp_reg1], 4*3*4(%[buf]) \n\t"
"mult $ac2, %[t0], %[temp_reg4] \n\t"
"mfhi %[temp_reg6] \n\t"
"mult $ac1, %[t1], %[temp_reg2] \n\t"
"lw %[temp_reg3], 23*4(%[win]) \n\t"
"lw %[s0], 16*4(%[tmp]) \n\t"
"mfhi %[temp_reg4], $ac2 \n\t"
"lw %[t1], 17*4(%[tmp]) \n\t"
"mult $ac3, %[t0], %[temp_reg3] \n\t"
"move %[s1], $0 \n\t"
"add %[temp_reg5], %[temp_reg5], %[temp_reg6] \n\t"
"mfhi %[temp_reg2], $ac1 \n\t"
"sw %[temp_reg5], 14*32*4(%[out]) \n\t"
"sw %[temp_reg4], 4*14*4(%[buf]) \n\t"
"mfhi %[temp_reg3], $ac3 \n\t"
"li %[temp_reg5], 0xB504F334 \n\t"
"add %[temp_reg1], %[temp_reg1], %[temp_reg2] \n\t"
"multu %[temp_reg5], %[t1] \n\t"
"lw %[temp_reg2], 4*13*4(%[buf]) \n\t"
"sw %[temp_reg1], 3*32*4(%[out]) \n\t"
"sra %[t1], %[t1], 31 \n\t"
"mfhi %[temp_reg6] \n\t"
"movn %[s1], %[temp_reg5], %[t1] \n\t"
"sw %[temp_reg3], 4*3*4(%[buf]) \n\t"
"lw %[temp_reg1], 13*4(%[win]) \n\t"
"lw %[temp_reg4], 4*4*4(%[buf]) \n\t"
"lw %[temp_reg3], 4*4(%[win]) \n\t"
"lw %[temp_reg5], 33*4(%[win]) \n\t"
"subu %[s1], %[temp_reg6], %[s1] \n\t"
"lw %[temp_reg6], 24*4(%[win]) \n\t"
"sub %[t1], %[s0], %[s1] \n\t"
"add %[t0], %[s0], %[s1] \n\t"
"mult $ac1, %[t1], %[temp_reg1] \n\t"
"mult $ac2, %[t1], %[temp_reg3] \n\t"
"mult $ac3, %[t0], %[temp_reg5] \n\t"
"mult %[t0], %[temp_reg6] \n\t"
"mfhi %[temp_reg1], $ac1 \n\t"
"mfhi %[temp_reg3], $ac2 \n\t"
"mfhi %[temp_reg5], $ac3 \n\t"
"mfhi %[temp_reg6] \n\t"
"add %[temp_reg2], %[temp_reg2], %[temp_reg1] \n\t"
"add %[temp_reg4], %[temp_reg4], %[temp_reg3] \n\t"
"sw %[temp_reg2], 13*32*4(%[out]) \n\t"
"sw %[temp_reg4], 4*32*4(%[out]) \n\t"
"sw %[temp_reg5], 4*13*4(%[buf]) \n\t"
"sw %[temp_reg6], 4*4*4(%[buf]) \n\t"
: [t0] "=&r" (t0), [t1] "=&r" (t1), [t2] "=&r" (t2), [t3] "=&r" (t3),
[s0] "=&r" (s0), [s2] "=&r" (s2), [temp_reg1] "=&r" (temp_reg1),
[temp_reg2] "=&r" (temp_reg2), [s1] "=&r" (s1), [s3] "=&r" (s3),
[temp_reg3] "=&r" (temp_reg3), [temp_reg4] "=&r" (temp_reg4),
[temp_reg5] "=&r" (temp_reg5), [temp_reg6] "=&r" (temp_reg6),
[out] "+r" (out)
: [tmp] "r" (tmp), [win] "r" (win), [buf] "r" (buf)
: "hi", "lo"
);
}
static void ff_imdct36_blocks_mips_fixed(int *out, int *buf, int *in,
int count, int switch_point, int block_type)
{
int j;
for (j=0 ; j < count; j++) {
/* apply window & overlap with previous buffer */
/* select window */
int win_idx = (switch_point && j < 2) ? 0 : block_type;
int *win = ff_mdct_win_fixed[win_idx + (4 & -(j & 1))];
imdct36_mips_fixed(out, buf, in, win);
in += 18;
buf += ((j&3) != 3 ? 1 : (72-3));
out++;
}
}
void ff_mpadsp_init_mipsdspr1(MPADSPContext *s)
{
s->apply_window_fixed = ff_mpadsp_apply_window_mips_fixed;
s->imdct36_blocks_fixed = ff_imdct36_blocks_mips_fixed;
}

File diff suppressed because it is too large Load Diff