diff options
author | Mike Melanson <mike@multimedia.cx> | 2005-10-29 23:57:06 +0000 |
---|---|---|
committer | Mike Melanson <mike@multimedia.cx> | 2005-10-29 23:57:06 +0000 |
commit | 03ac29c63fd3d5019c67b3662669b1c443896f0b (patch) | |
tree | 00db769b2943fce16b9967591652d2c0724be168 /src/libfaad/sbr_hfgen.c | |
parent | c7976c4d0d8d02fa18bc9fd82bafe99e333e2a53 (diff) | |
download | xine-lib-03ac29c63fd3d5019c67b3662669b1c443896f0b.tar.gz xine-lib-03ac29c63fd3d5019c67b3662669b1c443896f0b.tar.bz2 |
update libfaad2 to CVS snapshot 2004-09-15
CVS patchset: 7777
CVS date: 2005/10/29 23:57:06
Diffstat (limited to 'src/libfaad/sbr_hfgen.c')
-rw-r--r-- | src/libfaad/sbr_hfgen.c | 436 |
1 files changed, 304 insertions, 132 deletions
diff --git a/src/libfaad/sbr_hfgen.c b/src/libfaad/sbr_hfgen.c index 78bab9aa5..688b9a0ea 100644 --- a/src/libfaad/sbr_hfgen.c +++ b/src/libfaad/sbr_hfgen.c @@ -22,7 +22,7 @@ ** Commercial non-GPL licensing of this software is possible. ** For more info contact Ahead Software through Mpeg4AAClicense@nero.com. ** -** $Id: sbr_hfgen.c,v 1.4 2004/12/03 01:15:30 tmattern Exp $ +** $Id: sbr_hfgen.c,v 1.5 2005/10/29 23:57:07 tmmm Exp $ **/ /* High Frequency generation */ @@ -38,20 +38,19 @@ /* static function declarations */ -static void calc_prediction_coef(sbr_info *sbr, qmf_t Xlow[MAX_NTSRHFG][32], - complex_t *alpha_0, complex_t *alpha_1 -#ifdef SBR_LOW_POWER - , real_t *rxx -#endif - ); #ifdef SBR_LOW_POWER +static void calc_prediction_coef_lp(sbr_info *sbr, qmf_t Xlow[MAX_NTSRHFG][64], + complex_t *alpha_0, complex_t *alpha_1, real_t *rxx); static void calc_aliasing_degree(sbr_info *sbr, real_t *rxx, real_t *deg); +#else +static void calc_prediction_coef(sbr_info *sbr, qmf_t Xlow[MAX_NTSRHFG][64], + complex_t *alpha_0, complex_t *alpha_1, uint8_t k); #endif static void calc_chirp_factors(sbr_info *sbr, uint8_t ch); static void patch_construction(sbr_info *sbr); -void hf_generation(sbr_info *sbr, qmf_t Xlow[MAX_NTSRHFG][32], +void hf_generation(sbr_info *sbr, qmf_t Xlow[MAX_NTSRHFG][64], qmf_t Xhigh[MAX_NTSRHFG][64] #ifdef SBR_LOW_POWER ,real_t *deg @@ -68,26 +67,18 @@ void hf_generation(sbr_info *sbr, qmf_t Xlow[MAX_NTSRHFG][32], uint8_t first = sbr->t_E[ch][0]; uint8_t last = sbr->t_E[ch][sbr->L_E[ch]]; -// printf("%d %d\n", first, last); - calc_chirp_factors(sbr, ch); - for (i = first; i < last; i++) - { - memset(Xhigh[i + offset], 0, 64 * sizeof(qmf_t)); - } +#ifdef SBR_LOW_POWER + memset(deg, 0, 64*sizeof(real_t)); +#endif if ((ch == 0) && (sbr->Reset)) patch_construction(sbr); /* calculate the prediction coefficients */ - calc_prediction_coef(sbr, Xlow, alpha_0, alpha_1 -#ifdef SBR_LOW_POWER - , rxx -#endif - ); - #ifdef SBR_LOW_POWER + calc_prediction_coef_lp(sbr, Xlow, alpha_0, alpha_1, rxx); calc_aliasing_degree(sbr, rxx, deg); #endif @@ -96,7 +87,7 @@ void hf_generation(sbr_info *sbr, qmf_t Xlow[MAX_NTSRHFG][32], { for (x = 0; x < sbr->patchNoSubbands[i]; x++) { - complex_t a0, a1; + real_t a0_r, a0_i, a1_r, a1_i; real_t bw, bw2; uint8_t q, p, k, g; @@ -124,35 +115,54 @@ void hf_generation(sbr_info *sbr, qmf_t Xlow[MAX_NTSRHFG][32], /* with or without filtering */ if (bw2 > 0) { - RE(a0) = MUL_C(RE(alpha_0[p]), bw); - RE(a1) = MUL_C(RE(alpha_1[p]), bw2); + real_t temp1_r, temp2_r, temp3_r; #ifndef SBR_LOW_POWER - IM(a0) = MUL_C(IM(alpha_0[p]), bw); - IM(a1) = MUL_C(IM(alpha_1[p]), bw2); + real_t temp1_i, temp2_i, temp3_i; + calc_prediction_coef(sbr, Xlow, alpha_0, alpha_1, p); #endif + a0_r = MUL_C(RE(alpha_0[p]), bw); + a1_r = MUL_C(RE(alpha_1[p]), bw2); +#ifndef SBR_LOW_POWER + a0_i = MUL_C(IM(alpha_0[p]), bw); + a1_i = MUL_C(IM(alpha_1[p]), bw2); +#endif + + temp2_r = QMF_RE(Xlow[first - 2 + offset][p]); + temp3_r = QMF_RE(Xlow[first - 1 + offset][p]); +#ifndef SBR_LOW_POWER + temp2_i = QMF_IM(Xlow[first - 2 + offset][p]); + temp3_i = QMF_IM(Xlow[first - 1 + offset][p]); +#endif for (l = first; l < last; l++) { - QMF_RE(Xhigh[l + offset][k]) = QMF_RE(Xlow[l + offset][p]); + temp1_r = temp2_r; + temp2_r = temp3_r; + temp3_r = QMF_RE(Xlow[l + offset][p]); #ifndef SBR_LOW_POWER - QMF_IM(Xhigh[l + offset][k]) = QMF_IM(Xlow[l + offset][p]); + temp1_i = temp2_i; + temp2_i = temp3_i; + temp3_i = QMF_IM(Xlow[l + offset][p]); #endif #ifdef SBR_LOW_POWER - QMF_RE(Xhigh[l + offset][k]) += ( - MUL_R(RE(a0), QMF_RE(Xlow[l - 1 + offset][p])) + - MUL_R(RE(a1), QMF_RE(Xlow[l - 2 + offset][p]))); + QMF_RE(Xhigh[l + offset][k]) = + temp3_r + +(MUL_R(a0_r, temp2_r) + + MUL_R(a1_r, temp1_r)); #else - QMF_RE(Xhigh[l + offset][k]) += ( - RE(a0) * QMF_RE(Xlow[l - 1 + offset][p]) - - IM(a0) * QMF_IM(Xlow[l - 1 + offset][p]) + - RE(a1) * QMF_RE(Xlow[l - 2 + offset][p]) - - IM(a1) * QMF_IM(Xlow[l - 2 + offset][p])); - QMF_IM(Xhigh[l + offset][k]) += ( - IM(a0) * QMF_RE(Xlow[l - 1 + offset][p]) + - RE(a0) * QMF_IM(Xlow[l - 1 + offset][p]) + - IM(a1) * QMF_RE(Xlow[l - 2 + offset][p]) + - RE(a1) * QMF_IM(Xlow[l - 2 + offset][p])); + QMF_RE(Xhigh[l + offset][k]) = + temp3_r + +(MUL_R(a0_r, temp2_r) - + MUL_R(a0_i, temp2_i) + + MUL_R(a1_r, temp1_r) - + MUL_R(a1_i, temp1_i)); + QMF_IM(Xhigh[l + offset][k]) = + temp3_i + +(MUL_R(a0_i, temp2_r) + + MUL_R(a0_r, temp2_i) + + MUL_R(a1_i, temp1_r) + + MUL_R(a1_r, temp1_i)); #endif } } else { @@ -183,19 +193,57 @@ typedef struct real_t det; } acorr_coef; -#define SBR_ABS(A) ((A) < 0) ? -(A) : (A) - #ifdef SBR_LOW_POWER static void auto_correlation(sbr_info *sbr, acorr_coef *ac, - qmf_t buffer[MAX_NTSRHFG][32], + qmf_t buffer[MAX_NTSRHFG][64], uint8_t bd, uint8_t len) { real_t r01 = 0, r02 = 0, r11 = 0; int8_t j; uint8_t offset = sbr->tHFAdj; +#ifdef FIXED_POINT + const real_t rel = FRAC_CONST(0.999999); // 1 / (1 + 1e-6f); + uint32_t maxi = 0; + uint32_t pow2, exp; +#else const real_t rel = 1 / (1 + 1e-6f); +#endif +#ifdef FIXED_POINT + mask = 0; + + for (j = (offset-2); j < (len + offset); j++) + { + real_t x; + x = QMF_RE(buffer[j][bd])>>REAL_BITS; + mask |= x ^ (x >> 31); + } + + exp = wl_min_lzc(mask); + + /* improves accuracy */ + if (exp > 0) + exp -= 1; + + for (j = offset; j < len + offset; j++) + { + real_t buf_j = ((QMF_RE(buffer[j][bd])+(1<<(exp-1)))>>exp); + real_t buf_j_1 = ((QMF_RE(buffer[j-1][bd])+(1<<(exp-1)))>>exp); + real_t buf_j_2 = ((QMF_RE(buffer[j-2][bd])+(1<<(exp-1)))>>exp); + + /* normalisation with rounding */ + r01 += MUL_R(buf_j, buf_j_1); + r02 += MUL_R(buf_j, buf_j_2); + r11 += MUL_R(buf_j_1, buf_j_1); + } + RE(ac->r12) = r01 - + MUL_R(((QMF_RE(buffer[len+offset-1][bd])+(1<<(exp-1)))>>exp), ((QMF_RE(buffer[len+offset-2][bd])+(1<<(exp-1)))>>exp)) + + MUL_R(((QMF_RE(buffer[offset-1][bd])+(1<<(exp-1)))>>exp), ((QMF_RE(buffer[offset-2][bd])+(1<<(exp-1)))>>exp)); + RE(ac->r22) = r11 - + MUL_R(((QMF_RE(buffer[len+offset-2][bd])+(1<<(exp-1)))>>exp), ((QMF_RE(buffer[len+offset-2][bd])+(1<<(exp-1)))>>exp)) + + MUL_R(((QMF_RE(buffer[offset-2][bd])+(1<<(exp-1)))>>exp), ((QMF_RE(buffer[offset-2][bd])+(1<<(exp-1)))>>exp)); +#else for (j = offset; j < len + offset; j++) { r01 += QMF_RE(buffer[j][bd]) * QMF_RE(buffer[j-1][bd]); @@ -208,63 +256,213 @@ static void auto_correlation(sbr_info *sbr, acorr_coef *ac, RE(ac->r22) = r11 - QMF_RE(buffer[len+offset-2][bd]) * QMF_RE(buffer[len+offset-2][bd]) + QMF_RE(buffer[offset-2][bd]) * QMF_RE(buffer[offset-2][bd]); +#endif RE(ac->r01) = r01; RE(ac->r02) = r02; RE(ac->r11) = r11; - ac->det = MUL_R(RE(ac->r11), RE(ac->r22)) - MUL_C(MUL_R(RE(ac->r12), RE(ac->r12)), rel); + ac->det = MUL_R(RE(ac->r11), RE(ac->r22)) - MUL_F(MUL_R(RE(ac->r12), RE(ac->r12)), rel); } #else -static void auto_correlation(sbr_info *sbr, acorr_coef *ac, qmf_t buffer[MAX_NTSRHFG][32], +static void auto_correlation(sbr_info *sbr, acorr_coef *ac, qmf_t buffer[MAX_NTSRHFG][64], uint8_t bd, uint8_t len) { real_t r01r = 0, r01i = 0, r02r = 0, r02i = 0, r11r = 0; + real_t temp1_r, temp1_i, temp2_r, temp2_i, temp3_r, temp3_i, temp4_r, temp4_i, temp5_r, temp5_i; +#ifdef FIXED_POINT + const real_t rel = FRAC_CONST(0.999999); // 1 / (1 + 1e-6f); + uint32_t mask, exp; + real_t pow2_to_exp; +#else const real_t rel = 1 / (1 + 1e-6f); +#endif int8_t j; uint8_t offset = sbr->tHFAdj; +#ifdef FIXED_POINT + mask = 0; + + for (j = (offset-2); j < (len + offset); j++) + { + real_t x; + x = QMF_RE(buffer[j][bd])>>REAL_BITS; + mask |= x ^ (x >> 31); + x = QMF_IM(buffer[j][bd])>>REAL_BITS; + mask |= x ^ (x >> 31); + } + + exp = wl_min_lzc(mask); + + /* improves accuracy */ + if (exp > 0) + exp -= 1; + + pow2_to_exp = 1<<(exp-1); + + temp2_r = (QMF_RE(buffer[offset-2][bd]) + pow2_to_exp) >> exp; + temp2_i = (QMF_IM(buffer[offset-2][bd]) + pow2_to_exp) >> exp; + temp3_r = (QMF_RE(buffer[offset-1][bd]) + pow2_to_exp) >> exp; + temp3_i = (QMF_IM(buffer[offset-1][bd]) + pow2_to_exp) >> exp; + // Save these because they are needed after loop + temp4_r = temp2_r; + temp4_i = temp2_i; + temp5_r = temp3_r; + temp5_i = temp3_i; + + for (j = offset; j < len + offset; j++) + { + temp1_r = temp2_r; // temp1_r = (QMF_RE(buffer[offset-2][bd] + (1<<(exp-1))) >> exp; + temp1_i = temp2_i; // temp1_i = (QMF_IM(buffer[offset-2][bd] + (1<<(exp-1))) >> exp; + temp2_r = temp3_r; // temp2_r = (QMF_RE(buffer[offset-1][bd] + (1<<(exp-1))) >> exp; + temp2_i = temp3_i; // temp2_i = (QMF_IM(buffer[offset-1][bd] + (1<<(exp-1))) >> exp; + temp3_r = (QMF_RE(buffer[j][bd]) + pow2_to_exp) >> exp; + temp3_i = (QMF_IM(buffer[j][bd]) + pow2_to_exp) >> exp; + r01r += MUL_R(temp3_r, temp2_r) + MUL_R(temp3_i, temp2_i); + r01i += MUL_R(temp3_i, temp2_r) - MUL_R(temp3_r, temp2_i); + r02r += MUL_R(temp3_r, temp1_r) + MUL_R(temp3_i, temp1_i); + r02i += MUL_R(temp3_i, temp1_r) - MUL_R(temp3_r, temp1_i); + r11r += MUL_R(temp2_r, temp2_r) + MUL_R(temp2_i, temp2_i); + } + + // These are actual values in temporary variable at this point + // temp1_r = (QMF_RE(buffer[len+offset-1-2][bd] + (1<<(exp-1))) >> exp; + // temp1_i = (QMF_IM(buffer[len+offset-1-2][bd] + (1<<(exp-1))) >> exp; + // temp2_r = (QMF_RE(buffer[len+offset-1-1][bd] + (1<<(exp-1))) >> exp; + // temp2_i = (QMF_IM(buffer[len+offset-1-1][bd] + (1<<(exp-1))) >> exp; + // temp3_r = (QMF_RE(buffer[len+offset-1][bd]) + (1<<(exp-1))) >> exp; + // temp3_i = (QMF_IM(buffer[len+offset-1][bd]) + (1<<(exp-1))) >> exp; + // temp4_r = (QMF_RE(buffer[offset-2][bd]) + (1<<(exp-1))) >> exp; + // temp4_i = (QMF_IM(buffer[offset-2][bd]) + (1<<(exp-1))) >> exp; + // temp5_r = (QMF_RE(buffer[offset-1][bd]) + (1<<(exp-1))) >> exp; + // temp5_i = (QMF_IM(buffer[offset-1][bd]) + (1<<(exp-1))) >> exp; + + RE(ac->r12) = r01r - + (MUL_R(temp3_r, temp2_r) + MUL_R(temp3_i, temp2_i)) + + (MUL_R(temp5_r, temp4_r) + MUL_R(temp5_i, temp4_i)); + IM(ac->r12) = r01i - + (MUL_R(temp3_i, temp2_r) - MUL_R(temp3_r, temp2_i)) + + (MUL_R(temp5_i, temp4_r) - MUL_R(temp5_r, temp4_i)); + RE(ac->r22) = r11r - + (MUL_R(temp2_r, temp2_r) + MUL_R(temp2_i, temp2_i)) + + (MUL_R(temp4_r, temp4_r) + MUL_R(temp4_i, temp4_i)); + +#else + + temp2_r = QMF_RE(buffer[offset-2][bd]); + temp2_i = QMF_IM(buffer[offset-2][bd]); + temp3_r = QMF_RE(buffer[offset-1][bd]); + temp3_i = QMF_IM(buffer[offset-1][bd]); + // Save these because they are needed after loop + temp4_r = temp2_r; + temp4_i = temp2_i; + temp5_r = temp3_r; + temp5_i = temp3_i; for (j = offset; j < len + offset; j++) { - r01r += QMF_RE(buffer[j][bd]) * QMF_RE(buffer[j-1][bd]) + - QMF_IM(buffer[j][bd]) * QMF_IM(buffer[j-1][bd]); - r01i += QMF_IM(buffer[j][bd]) * QMF_RE(buffer[j-1][bd]) - - QMF_RE(buffer[j][bd]) * QMF_IM(buffer[j-1][bd]); - r02r += QMF_RE(buffer[j][bd]) * QMF_RE(buffer[j-2][bd]) + - QMF_IM(buffer[j][bd]) * QMF_IM(buffer[j-2][bd]); - r02i += QMF_IM(buffer[j][bd]) * QMF_RE(buffer[j-2][bd]) - - QMF_RE(buffer[j][bd]) * QMF_IM(buffer[j-2][bd]); - r11r += QMF_RE(buffer[j-1][bd]) * QMF_RE(buffer[j-1][bd]) + - QMF_IM(buffer[j-1][bd]) * QMF_IM(buffer[j-1][bd]); + temp1_r = temp2_r; // temp1_r = QMF_RE(buffer[j-2][bd]; + temp1_i = temp2_i; // temp1_i = QMF_IM(buffer[j-2][bd]; + temp2_r = temp3_r; // temp2_r = QMF_RE(buffer[j-1][bd]; + temp2_i = temp3_i; // temp2_i = QMF_IM(buffer[j-1][bd]; + temp3_r = QMF_RE(buffer[j][bd]); + temp3_i = QMF_IM(buffer[j][bd]); + r01r += temp3_r * temp2_r + temp3_i * temp2_i; + r01i += temp3_i * temp2_r - temp3_r * temp2_i; + r02r += temp3_r * temp1_r + temp3_i * temp1_i; + r02i += temp3_i * temp1_r - temp3_r * temp1_i; + r11r += temp2_r * temp2_r + temp2_i * temp2_i; } + // These are actual values in temporary variable at this point + // temp1_r = QMF_RE(buffer[len+offset-1-2][bd]; + // temp1_i = QMF_IM(buffer[len+offset-1-2][bd]; + // temp2_r = QMF_RE(buffer[len+offset-1-1][bd]; + // temp2_i = QMF_IM(buffer[len+offset-1-1][bd]; + // temp3_r = QMF_RE(buffer[len+offset-1][bd]); + // temp3_i = QMF_IM(buffer[len+offset-1][bd]); + // temp4_r = QMF_RE(buffer[offset-2][bd]); + // temp4_i = QMF_IM(buffer[offset-2][bd]); + // temp5_r = QMF_RE(buffer[offset-1][bd]); + // temp5_i = QMF_IM(buffer[offset-1][bd]); + + RE(ac->r12) = r01r - + (temp3_r * temp2_r + temp3_i * temp2_i) + + (temp5_r * temp4_r + temp5_i * temp4_i); + IM(ac->r12) = r01i - + (temp3_i * temp2_r - temp3_r * temp2_i) + + (temp5_i * temp4_r - temp5_r * temp4_i); + RE(ac->r22) = r11r - + (temp2_r * temp2_r + temp2_i * temp2_i) + + (temp4_r * temp4_r + temp4_i * temp4_i); + +#endif + RE(ac->r01) = r01r; IM(ac->r01) = r01i; RE(ac->r02) = r02r; IM(ac->r02) = r02i; RE(ac->r11) = r11r; - RE(ac->r12) = r01r - - (QMF_RE(buffer[len+offset-1][bd]) * QMF_RE(buffer[len+offset-2][bd]) + QMF_IM(buffer[len+offset-1][bd]) * QMF_IM(buffer[len+offset-2][bd])) + - (QMF_RE(buffer[offset-1][bd]) * QMF_RE(buffer[offset-2][bd]) + QMF_IM(buffer[offset-1][bd]) * QMF_IM(buffer[offset-2][bd])); - IM(ac->r12) = r01i - - (QMF_IM(buffer[len+offset-1][bd]) * QMF_RE(buffer[len+offset-2][bd]) - QMF_RE(buffer[len+offset-1][bd]) * QMF_IM(buffer[len+offset-2][bd])) + - (QMF_IM(buffer[offset-1][bd]) * QMF_RE(buffer[offset-2][bd]) - QMF_RE(buffer[offset-1][bd]) * QMF_IM(buffer[offset-2][bd])); - RE(ac->r22) = r11r - - (QMF_RE(buffer[len+offset-2][bd]) * QMF_RE(buffer[len+offset-2][bd]) + QMF_IM(buffer[len+offset-2][bd]) * QMF_IM(buffer[len+offset-2][bd])) + - (QMF_RE(buffer[offset-2][bd]) * QMF_RE(buffer[offset-2][bd]) + QMF_IM(buffer[offset-2][bd]) * QMF_IM(buffer[offset-2][bd])); - - ac->det = RE(ac->r11) * RE(ac->r22) - rel * (RE(ac->r12) * RE(ac->r12) + IM(ac->r12) * IM(ac->r12)); + ac->det = MUL_R(RE(ac->r11), RE(ac->r22)) - MUL_F(rel, (MUL_R(RE(ac->r12), RE(ac->r12)) + MUL_R(IM(ac->r12), IM(ac->r12)))); } #endif /* calculate linear prediction coefficients using the covariance method */ -static void calc_prediction_coef(sbr_info *sbr, qmf_t Xlow[MAX_NTSRHFG][32], - complex_t *alpha_0, complex_t *alpha_1 -#ifdef SBR_LOW_POWER - , real_t *rxx +#ifndef SBR_LOW_POWER +static void calc_prediction_coef(sbr_info *sbr, qmf_t Xlow[MAX_NTSRHFG][64], + complex_t *alpha_0, complex_t *alpha_1, uint8_t k) +{ + real_t tmp; + acorr_coef ac; + + auto_correlation(sbr, &ac, Xlow, k, sbr->numTimeSlotsRate + 6); + + if (ac.det == 0) + { + RE(alpha_1[k]) = 0; + IM(alpha_1[k]) = 0; + } else { +#ifdef FIXED_POINT + tmp = (MUL_R(RE(ac.r01), RE(ac.r12)) - MUL_R(IM(ac.r01), IM(ac.r12)) - MUL_R(RE(ac.r02), RE(ac.r11))); + RE(alpha_1[k]) = DIV_R(tmp, ac.det); + tmp = (MUL_R(IM(ac.r01), RE(ac.r12)) + MUL_R(RE(ac.r01), IM(ac.r12)) - MUL_R(IM(ac.r02), RE(ac.r11))); + IM(alpha_1[k]) = DIV_R(tmp, ac.det); +#else + tmp = REAL_CONST(1.0) / ac.det; + RE(alpha_1[k]) = (MUL_R(RE(ac.r01), RE(ac.r12)) - MUL_R(IM(ac.r01), IM(ac.r12)) - MUL_R(RE(ac.r02), RE(ac.r11))) * tmp; + IM(alpha_1[k]) = (MUL_R(IM(ac.r01), RE(ac.r12)) + MUL_R(RE(ac.r01), IM(ac.r12)) - MUL_R(IM(ac.r02), RE(ac.r11))) * tmp; #endif - ) + } + + if (RE(ac.r11) == 0) + { + RE(alpha_0[k]) = 0; + IM(alpha_0[k]) = 0; + } else { +#ifdef FIXED_POINT + tmp = -(RE(ac.r01) + MUL_R(RE(alpha_1[k]), RE(ac.r12)) + MUL_R(IM(alpha_1[k]), IM(ac.r12))); + RE(alpha_0[k]) = DIV_R(tmp, RE(ac.r11)); + tmp = -(IM(ac.r01) + MUL_R(IM(alpha_1[k]), RE(ac.r12)) - MUL_R(RE(alpha_1[k]), IM(ac.r12))); + IM(alpha_0[k]) = DIV_R(tmp, RE(ac.r11)); +#else + tmp = 1.0f / RE(ac.r11); + RE(alpha_0[k]) = -(RE(ac.r01) + MUL_R(RE(alpha_1[k]), RE(ac.r12)) + MUL_R(IM(alpha_1[k]), IM(ac.r12))) * tmp; + IM(alpha_0[k]) = -(IM(ac.r01) + MUL_R(IM(alpha_1[k]), RE(ac.r12)) - MUL_R(RE(alpha_1[k]), IM(ac.r12))) * tmp; +#endif + } + + if ((MUL_R(RE(alpha_0[k]),RE(alpha_0[k])) + MUL_R(IM(alpha_0[k]),IM(alpha_0[k])) >= REAL_CONST(16)) || + (MUL_R(RE(alpha_1[k]),RE(alpha_1[k])) + MUL_R(IM(alpha_1[k]),IM(alpha_1[k])) >= REAL_CONST(16))) + { + RE(alpha_0[k]) = 0; + IM(alpha_0[k]) = 0; + RE(alpha_1[k]) = 0; + IM(alpha_1[k]) = 0; + } +} +#else +static void calc_prediction_coef_lp(sbr_info *sbr, qmf_t Xlow[MAX_NTSRHFG][64], + complex_t *alpha_0, complex_t *alpha_1, real_t *rxx) { uint8_t k; real_t tmp; @@ -274,21 +472,16 @@ static void calc_prediction_coef(sbr_info *sbr, qmf_t Xlow[MAX_NTSRHFG][32], { auto_correlation(sbr, &ac, Xlow, k, sbr->numTimeSlotsRate + 6); -#ifdef SBR_LOW_POWER if (ac.det == 0) { + RE(alpha_0[k]) = 0; RE(alpha_1[k]) = 0; } else { - tmp = MUL_R(RE(ac.r01), RE(ac.r12)) - MUL_R(RE(ac.r02), RE(ac.r11)); - RE(alpha_1[k]) = SBR_DIV(tmp, ac.det); - } + tmp = MUL_R(RE(ac.r01), RE(ac.r22)) - MUL_R(RE(ac.r12), RE(ac.r02)); + RE(alpha_0[k]) = DIV_R(tmp, (-ac.det)); - if (RE(ac.r11) == 0) - { - RE(alpha_0[k]) = 0; - } else { - tmp = RE(ac.r01) + MUL_R(RE(alpha_1[k]), RE(ac.r12)); - RE(alpha_0[k]) = -SBR_DIV(tmp, RE(ac.r11)); + tmp = MUL_R(RE(ac.r01), RE(ac.r12)) - MUL_R(RE(ac.r02), RE(ac.r11)); + RE(alpha_1[k]) = DIV_R(tmp, ac.det); } if ((RE(alpha_0[k]) >= REAL_CONST(4)) || (RE(alpha_1[k]) >= REAL_CONST(4))) @@ -300,84 +493,54 @@ static void calc_prediction_coef(sbr_info *sbr, qmf_t Xlow[MAX_NTSRHFG][32], /* reflection coefficient */ if (RE(ac.r11) == 0) { - rxx[k] = REAL_CONST(0.0); + rxx[k] = COEF_CONST(0.0); } else { - rxx[k] = -SBR_DIV(RE(ac.r01), RE(ac.r11)); - if (rxx[k] > REAL_CONST(1.0)) rxx[k] = REAL_CONST(1.0); - if (rxx[k] < REAL_CONST(-1.0)) rxx[k] = REAL_CONST(-1.0); + rxx[k] = DIV_C(RE(ac.r01), RE(ac.r11)); + rxx[k] = -rxx[k]; + if (rxx[k] > COEF_CONST(1.0)) rxx[k] = COEF_CONST(1.0); + if (rxx[k] < COEF_CONST(-1.0)) rxx[k] = COEF_CONST(-1.0); } -#else - if (ac.det == 0) - { - RE(alpha_1[k]) = 0; - IM(alpha_1[k]) = 0; - } else { - tmp = REAL_CONST(1.0) / ac.det; - RE(alpha_1[k]) = (RE(ac.r01) * RE(ac.r12) - IM(ac.r01) * IM(ac.r12) - RE(ac.r02) * RE(ac.r11)) * tmp; - IM(alpha_1[k]) = (IM(ac.r01) * RE(ac.r12) + RE(ac.r01) * IM(ac.r12) - IM(ac.r02) * RE(ac.r11)) * tmp; - } - - if (RE(ac.r11) == 0) - { - RE(alpha_0[k]) = 0; - IM(alpha_0[k]) = 0; - } else { - tmp = 1.0f / RE(ac.r11); - RE(alpha_0[k]) = -(RE(ac.r01) + RE(alpha_1[k]) * RE(ac.r12) + IM(alpha_1[k]) * IM(ac.r12)) * tmp; - IM(alpha_0[k]) = -(IM(ac.r01) + IM(alpha_1[k]) * RE(ac.r12) - RE(alpha_1[k]) * IM(ac.r12)) * tmp; - } - - if ((RE(alpha_0[k])*RE(alpha_0[k]) + IM(alpha_0[k])*IM(alpha_0[k]) >= 16) || - (RE(alpha_1[k])*RE(alpha_1[k]) + IM(alpha_1[k])*IM(alpha_1[k]) >= 16)) - { - RE(alpha_0[k]) = 0; - IM(alpha_0[k]) = 0; - RE(alpha_1[k]) = 0; - IM(alpha_1[k]) = 0; - } -#endif } } -#ifdef SBR_LOW_POWER static void calc_aliasing_degree(sbr_info *sbr, real_t *rxx, real_t *deg) { uint8_t k; - rxx[0] = REAL_CONST(0.0); - deg[1] = REAL_CONST(0.0); + rxx[0] = COEF_CONST(0.0); + deg[1] = COEF_CONST(0.0); for (k = 2; k < sbr->k0; k++) { deg[k] = 0.0; - if ((k % 2 == 0) && (rxx[k] < REAL_CONST(0.0))) + if ((k % 2 == 0) && (rxx[k] < COEF_CONST(0.0))) { if (rxx[k-1] < 0.0) { - deg[k] = REAL_CONST(1.0); + deg[k] = COEF_CONST(1.0); - if (rxx[k-2] > REAL_CONST(0.0)) + if (rxx[k-2] > COEF_CONST(0.0)) { - deg[k-1] = REAL_CONST(1.0) - MUL_R(rxx[k-1], rxx[k-1]); + deg[k-1] = COEF_CONST(1.0) - MUL_C(rxx[k-1], rxx[k-1]); } - } else if (rxx[k-2] > REAL_CONST(0.0)) { - deg[k] = REAL_CONST(1.0) - MUL_R(rxx[k-1], rxx[k-1]); + } else if (rxx[k-2] > COEF_CONST(0.0)) { + deg[k] = COEF_CONST(1.0) - MUL_C(rxx[k-1], rxx[k-1]); } } - if ((k % 2 == 1) && (rxx[k] > REAL_CONST(0.0))) + if ((k % 2 == 1) && (rxx[k] > COEF_CONST(0.0))) { - if (rxx[k-1] > REAL_CONST(0.0)) + if (rxx[k-1] > COEF_CONST(0.0)) { - deg[k] = REAL_CONST(1.0); + deg[k] = COEF_CONST(1.0); - if (rxx[k-2] < REAL_CONST(0.0)) + if (rxx[k-2] < COEF_CONST(0.0)) { - deg[k-1] = REAL_CONST(1.0) - MUL_R(rxx[k-1], rxx[k-1]); + deg[k-1] = COEF_CONST(1.0) - MUL_C(rxx[k-1], rxx[k-1]); } - } else if (rxx[k-2] < REAL_CONST(0.0)) { - deg[k] = REAL_CONST(1.0) - MUL_R(rxx[k-1], rxx[k-1]); + } else if (rxx[k-2] < COEF_CONST(0.0)) { + deg[k] = COEF_CONST(1.0) - MUL_C(rxx[k-1], rxx[k-1]); } } } @@ -440,7 +603,7 @@ static void patch_construction(sbr_info *sbr) uint8_t odd, sb; uint8_t msb = sbr->k0; uint8_t usb = sbr->kx; - uint8_t goalSbTab[] = { 21, 23, 43, 46, 64, 85, 93, 128, 0, 0, 0 }; + uint8_t goalSbTab[] = { 21, 23, 32, 43, 46, 64, 85, 93, 128, 0, 0, 0 }; /* (uint8_t)(2.048e6/sbr->sample_rate + 0.5); */ uint8_t goalSb = goalSbTab[get_sr_index(sbr->sample_rate)]; @@ -454,6 +617,15 @@ static void patch_construction(sbr_info *sbr) k = sbr->N_master; } + if (sbr->N_master == 0) + { + sbr->noPatches = 0; + sbr->patchNoSubbands[0] = 0; + sbr->patchStartSubband[0] = 0; + + return; + } + do { uint8_t j = k + 1; |