Commit 4460a860 authored by J.R. Mauro's avatar J.R. Mauro Committed by Greg Kroah-Hartman

Staging: Lindent the echo driver

Lindent drivers/staging/echo*

Signed-off by: J.R. Mauro <jrm8005@gmail.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@suse.de>
parent 786ed801
......@@ -38,11 +38,12 @@ static __inline__ int top_bit(unsigned int bits)
{
int res;
__asm__ (" xorl %[res],%[res];\n"
__asm__(" xorl %[res],%[res];\n"
" decl %[res];\n"
" bsrl %[bits],%[res]\n"
: [res] "=&r" (res)
: [bits] "rm" (bits));
:[res] "=&r" (res)
:[bits] "rm"(bits)
);
return res;
}
......@@ -53,11 +54,12 @@ static __inline__ int bottom_bit(unsigned int bits)
{
int res;
__asm__ (" xorl %[res],%[res];\n"
__asm__(" xorl %[res],%[res];\n"
" decl %[res];\n"
" bsfl %[bits],%[res]\n"
: [res] "=&r" (res)
: [bits] "rm" (bits));
:[res] "=&r" (res)
:[bits] "rm"(bits)
);
return res;
}
#else
......@@ -68,28 +70,23 @@ static __inline__ int top_bit(unsigned int bits)
if (bits == 0)
return -1;
i = 0;
if (bits & 0xFFFF0000)
{
if (bits & 0xFFFF0000) {
bits &= 0xFFFF0000;
i += 16;
}
if (bits & 0xFF00FF00)
{
if (bits & 0xFF00FF00) {
bits &= 0xFF00FF00;
i += 8;
}
if (bits & 0xF0F0F0F0)
{
if (bits & 0xF0F0F0F0) {
bits &= 0xF0F0F0F0;
i += 4;
}
if (bits & 0xCCCCCCCC)
{
if (bits & 0xCCCCCCCC) {
bits &= 0xCCCCCCCC;
i += 2;
}
if (bits & 0xAAAAAAAA)
{
if (bits & 0xAAAAAAAA) {
bits &= 0xAAAAAAAA;
i += 1;
}
......@@ -103,28 +100,23 @@ static __inline__ int bottom_bit(unsigned int bits)
if (bits == 0)
return -1;
i = 32;
if (bits & 0x0000FFFF)
{
if (bits & 0x0000FFFF) {
bits &= 0x0000FFFF;
i -= 16;
}
if (bits & 0x00FF00FF)
{
if (bits & 0x00FF00FF) {
bits &= 0x00FF00FF;
i -= 8;
}
if (bits & 0x0F0F0F0F)
{
if (bits & 0x0F0F0F0F) {
bits &= 0x0F0F0F0F;
i -= 4;
}
if (bits & 0x33333333)
{
if (bits & 0x33333333) {
bits &= 0x33333333;
i -= 2;
}
if (bits & 0x55555555)
{
if (bits & 0x55555555) {
bits &= 0x55555555;
i -= 1;
}
......@@ -139,7 +131,8 @@ static __inline__ uint8_t bit_reverse8(uint8_t x)
{
#if defined(__i386__) || defined(__x86_64__)
/* If multiply is fast */
return ((x*0x0802U & 0x22110U) | (x*0x8020U & 0x88440U))*0x10101U >> 16;
return ((x * 0x0802U & 0x22110U) | (x * 0x8020U & 0x88440U)) *
0x10101U >> 16;
#else
/* If multiply is slow, but we have a barrel shifter */
x = (x >> 4) | (x << 4);
......
This diff is collapsed.
......@@ -124,9 +124,8 @@ a minor burden.
G.168 echo canceller descriptor. This defines the working state for a line
echo canceller.
*/
struct oslec_state
{
int16_t tx,rx;
struct oslec_state {
int16_t tx, rx;
int16_t clean;
int16_t clean_nlp;
......
......@@ -72,8 +72,7 @@
16 bit integer FIR descriptor. This defines the working state for a single
instance of an FIR filter using 16 bit integer coefficients.
*/
typedef struct
{
typedef struct {
int taps;
int curr_pos;
const int16_t *coeffs;
......@@ -85,8 +84,7 @@ typedef struct
instance of an FIR filter using 32 bit integer coefficients, and filtering
16 bit integer data.
*/
typedef struct
{
typedef struct {
int taps;
int curr_pos;
const int32_t *coeffs;
......@@ -97,39 +95,37 @@ typedef struct
Floating point FIR descriptor. This defines the working state for a single
instance of an FIR filter using floating point coefficients and data.
*/
typedef struct
{
typedef struct {
int taps;
int curr_pos;
const float *coeffs;
float *history;
} fir_float_state_t;
static __inline__ const int16_t *fir16_create(fir16_state_t *fir,
const int16_t *coeffs,
int taps)
static __inline__ const int16_t *fir16_create(fir16_state_t * fir,
const int16_t * coeffs, int taps)
{
fir->taps = taps;
fir->curr_pos = taps - 1;
fir->coeffs = coeffs;
#if defined(USE_MMX) || defined(USE_SSE2) || defined(__bfin__)
fir->history = kcalloc(2*taps, sizeof(int16_t), GFP_KERNEL);
fir->history = kcalloc(2 * taps, sizeof(int16_t), GFP_KERNEL);
#else
fir->history = kcalloc(taps, sizeof(int16_t), GFP_KERNEL);
#endif
return fir->history;
}
static __inline__ void fir16_flush(fir16_state_t *fir)
static __inline__ void fir16_flush(fir16_state_t * fir)
{
#if defined(USE_MMX) || defined(USE_SSE2) || defined(__bfin__)
memset(fir->history, 0, 2*fir->taps*sizeof(int16_t));
memset(fir->history, 0, 2 * fir->taps * sizeof(int16_t));
#else
memset(fir->history, 0, fir->taps*sizeof(int16_t));
memset(fir->history, 0, fir->taps * sizeof(int16_t));
#endif
}
static __inline__ void fir16_free(fir16_state_t *fir)
static __inline__ void fir16_free(fir16_state_t * fir)
{
kfree(fir->history);
}
......@@ -141,9 +137,7 @@ static inline int32_t dot_asm(short *x, short *y, int len)
len--;
__asm__
(
"I0 = %1;\n\t"
__asm__("I0 = %1;\n\t"
"I1 = %2;\n\t"
"A0 = 0;\n\t"
"R0.L = W[I0++] || R1.L = W[I1++];\n\t"
......@@ -154,16 +148,16 @@ static inline int32_t dot_asm(short *x, short *y, int len)
"A0 += R0.L*R1.L (IS);\n\t"
"R0 = A0;\n\t"
"%0 = R0;\n\t"
: "=&d" (dot)
: "a" (x), "a" (y), "a" (len)
: "I0", "I1", "A1", "A0", "R0", "R1"
:"=&d"(dot)
:"a"(x), "a"(y), "a"(len)
:"I0", "I1", "A1", "A0", "R0", "R1"
);
return dot;
}
#endif
static __inline__ int16_t fir16(fir16_state_t *fir, int16_t sample)
static __inline__ int16_t fir16(fir16_state_t * fir, int16_t sample)
{
int32_t y;
#if defined(USE_MMX)
......@@ -175,12 +169,11 @@ static __inline__ int16_t fir16(fir16_state_t *fir, int16_t sample)
fir->history[fir->curr_pos + fir->taps] = sample;
mmx_coeffs = (mmx_t *) fir->coeffs;
mmx_hist = (mmx_t *) &fir->history[fir->curr_pos];
mmx_hist = (mmx_t *) & fir->history[fir->curr_pos];
i = fir->taps;
pxor_r2r(mm4, mm4);
/* 8 samples per iteration, so the filter must be a multiple of 8 long. */
while (i > 0)
{
while (i > 0) {
movq_m2r(mmx_coeffs[0], mm0);
movq_m2r(mmx_coeffs[1], mm2);
movq_m2r(mmx_hist[0], mm1);
......@@ -207,12 +200,11 @@ static __inline__ int16_t fir16(fir16_state_t *fir, int16_t sample)
fir->history[fir->curr_pos + fir->taps] = sample;
xmm_coeffs = (xmm_t *) fir->coeffs;
xmm_hist = (xmm_t *) &fir->history[fir->curr_pos];
xmm_hist = (xmm_t *) & fir->history[fir->curr_pos];
i = fir->taps;
pxor_r2r(xmm4, xmm4);
/* 16 samples per iteration, so the filter must be a multiple of 16 long. */
while (i > 0)
{
while (i > 0) {
movdqu_m2r(xmm_coeffs[0], xmm0);
movdqu_m2r(xmm_coeffs[1], xmm2);
movdqu_m2r(xmm_hist[0], xmm1);
......@@ -235,7 +227,8 @@ static __inline__ int16_t fir16(fir16_state_t *fir, int16_t sample)
#elif defined(__bfin__)
fir->history[fir->curr_pos] = sample;
fir->history[fir->curr_pos + fir->taps] = sample;
y = dot_asm((int16_t*)fir->coeffs, &fir->history[fir->curr_pos], fir->taps);
y = dot_asm((int16_t *) fir->coeffs, &fir->history[fir->curr_pos],
fir->taps);
#else
int i;
int offset1;
......@@ -247,9 +240,9 @@ static __inline__ int16_t fir16(fir16_state_t *fir, int16_t sample)
offset1 = fir->taps - offset2;
y = 0;
for (i = fir->taps - 1; i >= offset1; i--)
y += fir->coeffs[i]*fir->history[i - offset1];
for ( ; i >= 0; i--)
y += fir->coeffs[i]*fir->history[i + offset2];
y += fir->coeffs[i] * fir->history[i - offset1];
for (; i >= 0; i--)
y += fir->coeffs[i] * fir->history[i + offset2];
#endif
if (fir->curr_pos <= 0)
fir->curr_pos = fir->taps;
......@@ -257,9 +250,8 @@ static __inline__ int16_t fir16(fir16_state_t *fir, int16_t sample)
return (int16_t) (y >> 15);
}
static __inline__ const int16_t *fir32_create(fir32_state_t *fir,
const int32_t *coeffs,
int taps)
static __inline__ const int16_t *fir32_create(fir32_state_t * fir,
const int32_t * coeffs, int taps)
{
fir->taps = taps;
fir->curr_pos = taps - 1;
......@@ -268,17 +260,17 @@ static __inline__ const int16_t *fir32_create(fir32_state_t *fir,
return fir->history;
}
static __inline__ void fir32_flush(fir32_state_t *fir)
static __inline__ void fir32_flush(fir32_state_t * fir)
{
memset(fir->history, 0, fir->taps*sizeof(int16_t));
memset(fir->history, 0, fir->taps * sizeof(int16_t));
}
static __inline__ void fir32_free(fir32_state_t *fir)
static __inline__ void fir32_free(fir32_state_t * fir)
{
kfree(fir->history);
}
static __inline__ int16_t fir32(fir32_state_t *fir, int16_t sample)
static __inline__ int16_t fir32(fir32_state_t * fir, int16_t sample)
{
int i;
int32_t y;
......@@ -290,9 +282,9 @@ static __inline__ int16_t fir32(fir32_state_t *fir, int16_t sample)
offset1 = fir->taps - offset2;
y = 0;
for (i = fir->taps - 1; i >= offset1; i--)
y += fir->coeffs[i]*fir->history[i - offset1];
for ( ; i >= 0; i--)
y += fir->coeffs[i]*fir->history[i + offset2];
y += fir->coeffs[i] * fir->history[i - offset1];
for (; i >= 0; i--)
y += fir->coeffs[i] * fir->history[i + offset2];
if (fir->curr_pos <= 0)
fir->curr_pos = fir->taps;
fir->curr_pos--;
......
......@@ -44,7 +44,6 @@ typedef union {
char b[16];
} xmm_t;
#define mmx_i2r(op,imm,reg) \
__asm__ __volatile__ (#op " %0, %%" #reg \
: /* nothing */ \
......@@ -63,7 +62,6 @@ typedef union {
#define mmx_r2r(op,regs,regd) \
__asm__ __volatile__ (#op " %" #regs ", %" #regd)
#define emms() __asm__ __volatile__ ("emms")
#define movd_m2r(var,reg) mmx_m2r (movd, var, reg)
......@@ -192,16 +190,13 @@ typedef union {
#define pxor_m2r(var,reg) mmx_m2r (pxor, var, reg)
#define pxor_r2r(regs,regd) mmx_r2r (pxor, regs, regd)
/* 3DNOW extensions */
#define pavgusb_m2r(var,reg) mmx_m2r (pavgusb, var, reg)
#define pavgusb_r2r(regs,regd) mmx_r2r (pavgusb, regs, regd)
/* AMD MMX extensions - also available in intel SSE */
#define mmx_m2ri(op,mem,reg,imm) \
__asm__ __volatile__ (#op " %1, %0, %%" #reg \
: /* nothing */ \
......@@ -216,7 +211,6 @@ typedef union {
: /* nothing */ \
: "m" (mem))
#define maskmovq(regs,maskreg) mmx_r2ri (maskmovq, regs, maskreg)
#define movntq_r2m(mmreg,var) mmx_r2m (movntq, mmreg, var)
......@@ -284,5 +278,4 @@ typedef union {
#define punpcklqdq_r2r(regs,regd) mmx_r2r (punpcklqdq, regs, regd)
#define punpckhqdq_r2r(regs,regd) mmx_r2r (punpckhqdq, regs, regd)
#endif /* AVCODEC_I386MMX_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment