File: | libs/spandsp/src/gsm0610_rpe.c |
Location: | line 245, column 5 |
Description: | Value stored to 'EM' is never read |
1 | /* |
2 | * SpanDSP - a series of DSP components for telephony |
3 | * |
4 | * gsm0610_rpe.c - GSM 06.10 full rate speech codec. |
5 | * |
6 | * Written by Steve Underwood <steveu@coppice.org> |
7 | * |
8 | * Copyright (C) 2006 Steve Underwood |
9 | * |
10 | * All rights reserved. |
11 | * |
12 | * This program is free software; you can redistribute it and/or modify |
13 | * it under the terms of the GNU Lesser General Public License version 2.1, |
14 | * as published by the Free Software Foundation. |
15 | * |
16 | * This program is distributed in the hope that it will be useful, |
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
19 | * GNU Lesser General Public License for more details. |
20 | * |
21 | * You should have received a copy of the GNU Lesser General Public |
22 | * License along with this program; if not, write to the Free Software |
23 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
24 | * |
25 | * This code is based on the widely used GSM 06.10 code available from |
26 | * http://kbs.cs.tu-berlin.de/~jutta/toast.html |
27 | */ |
28 | |
29 | /*! \file */ |
30 | |
31 | #if defined(HAVE_CONFIG_H1) |
32 | #include "config.h" |
33 | #endif |
34 | |
35 | #include <assert.h> |
36 | #include <inttypes.h> |
37 | #if defined(HAVE_TGMATH_H1) |
38 | #include <tgmath.h> |
39 | #endif |
40 | #if defined(HAVE_MATH_H1) |
41 | #include <math.h> |
42 | #endif |
43 | #include "floating_fudge.h" |
44 | #include <stdlib.h> |
45 | |
46 | #include "mmx_sse_decs.h" |
47 | |
48 | #include "spandsp/telephony.h" |
49 | #include "spandsp/fast_convert.h" |
50 | #include "spandsp/bitstream.h" |
51 | #include "spandsp/saturated.h" |
52 | #include "spandsp/gsm0610.h" |
53 | |
54 | #include "gsm0610_local.h" |
55 | |
56 | /* 4.2.13 .. 4.2.17 RPE ENCODING SECTION */ |
57 | |
58 | /* 4.2.13 */ |
59 | static void weighting_filter(int16_t x[40], |
60 | const int16_t *e) // signal [-5..0.39.44] IN) |
61 | { |
62 | #if defined(__GNUC__4) && defined(SPANDSP_USE_MMX1) && defined(__x86_64__1) && !defined(__OpenBSD__) |
63 | /* Table 4.4 Coefficients of the weighting filter */ |
64 | /* This must be padded to a multiple of 4 for MMX to work */ |
65 | static const union |
66 | { |
67 | int16_t gsm_H[12]; |
68 | __m64 x[3]; |
69 | } gsm_H = |
70 | { |
71 | { |
72 | -134, -374, 0, 2054, 5741, 8192, 5741, 2054, 0, -374, -134, 0 |
73 | } |
74 | |
75 | }; |
76 | |
77 | __asm__ __volatile__( |
78 | " emms;\n" |
79 | " addq $-10,%%rcx;\n" |
80 | " leaq %[gsm_H],%%rax;\n" |
81 | " movq (%%rax),%%mm1;\n" |
82 | " movq 8(%%rax),%%mm2;\n" |
83 | " movq 16(%%rax),%%mm3;\n" |
84 | " movq $0x1000,%%rax;\n" |
85 | " movq %%rax,%%mm5;\n" /* For rounding */ |
86 | " xorq %%rsi,%%rsi;\n" |
87 | " .p2align 2;\n" |
88 | "1:\n" |
89 | " movq (%%rcx,%%rsi,2),%%mm0;\n" |
90 | " pmaddwd %%mm1,%%mm0;\n" |
91 | |
92 | " movq 8(%%rcx,%%rsi,2),%%mm4;\n" |
93 | " pmaddwd %%mm2,%%mm4;\n" |
94 | " paddd %%mm4,%%mm0;\n" |
95 | |
96 | " movq 16(%%rcx,%%rsi,2),%%mm4;\n" |
97 | " pmaddwd %%mm3,%%mm4;\n" |
98 | " paddd %%mm4,%%mm0;\n" |
99 | |
100 | " movq %%mm0,%%mm4;\n" |
101 | " punpckhdq %%mm0,%%mm4;\n" /* mm4 has high int32 of mm0 dup'd */ |
102 | " paddd %%mm4,%%mm0;\n" |
103 | |
104 | " paddd %%mm5,%%mm0;\n" /* Add for roundoff */ |
105 | " psrad $13,%%mm0;\n" |
106 | " packssdw %%mm0,%%mm0;\n" |
107 | " movd %%mm0,%%eax;\n" /* eax has result */ |
108 | " movw %%ax,(%%rdi,%%rsi,2);\n" |
109 | " incq %%rsi;\n" |
110 | " cmpq $39,%%rsi;\n" |
111 | " jle 1b;\n" |
112 | " emms;\n" |
113 | : |
114 | : "c" (e), "D" (x), [gsm_H] "X" (gsm_H) |
115 | : "rax", "rdx", "rsi", "memory" |
116 | ); |
117 | #elif defined(__GNUC__4) && defined(SPANDSP_USE_MMX1) && defined(__i386__) |
118 | /* Table 4.4 Coefficients of the weighting filter */ |
119 | /* This must be padded to a multiple of 4 for MMX to work */ |
120 | static const union |
121 | { |
122 | int16_t gsm_H[12]; |
123 | __m64 x[3]; |
124 | } gsm_H = |
125 | { |
126 | { |
127 | -134, -374, 0, 2054, 5741, 8192, 5741, 2054, 0, -374, -134, 0 |
128 | } |
129 | |
130 | }; |
131 | |
132 | __asm__ __volatile__( |
133 | " emms;\n" |
134 | " addl $-10,%%ecx;\n" |
135 | " leal %[gsm_H],%%eax;\n" |
136 | " movq (%%eax),%%mm1;\n" |
137 | " movq 8(%%eax),%%mm2;\n" |
138 | " movq 16(%%eax),%%mm3;\n" |
139 | " movl $0x1000,%%eax;\n" |
140 | " movd %%eax,%%mm5;\n" /* For rounding */ |
141 | " xorl %%esi,%%esi;\n" |
142 | " .p2align 2;\n" |
143 | "1:\n" |
144 | " movq (%%ecx,%%esi,2),%%mm0;\n" |
145 | " pmaddwd %%mm1,%%mm0;\n" |
146 | |
147 | " movq 8(%%ecx,%%esi,2),%%mm4;\n" |
148 | " pmaddwd %%mm2,%%mm4;\n" |
149 | " paddd %%mm4,%%mm0;\n" |
150 | |
151 | " movq 16(%%ecx,%%esi,2),%%mm4;\n" |
152 | " pmaddwd %%mm3,%%mm4;\n" |
153 | " paddd %%mm4,%%mm0;\n" |
154 | |
155 | " movq %%mm0,%%mm4;\n" |
156 | " punpckhdq %%mm0,%%mm4;\n" /* mm4 has high int32 of mm0 dup'd */ |
157 | " paddd %%mm4,%%mm0;\n" |
158 | |
159 | " paddd %%mm5,%%mm0;\n" /* Add for roundoff */ |
160 | " psrad $13,%%mm0;\n" |
161 | " packssdw %%mm0,%%mm0;\n" |
162 | " movd %%mm0,%%eax;\n" /* eax has result */ |
163 | " movw %%ax,(%%edi,%%esi,2);\n" |
164 | " incl %%esi;\n" |
165 | " cmpl $39,%%esi;\n" |
166 | " jle 1b;\n" |
167 | " emms;\n" |
168 | : |
169 | : "c" (e), "D" (x), [gsm_H] "X" (gsm_H) |
170 | : "eax", "edx", "esi", "memory" |
171 | ); |
172 | #else |
173 | int32_t result; |
174 | int k; |
175 | |
176 | /* The coefficients of the weighting filter are stored in a table |
177 | (see table 4.4). The following scaling is used: |
178 | |
179 | H[0..10] = integer(real_H[0..10] * 8192); |
180 | */ |
181 | /* Initialization of a temporary working array wt[0...49] */ |
182 | |
183 | /* for (k = 0; k <= 4; k++) wt[k] = 0; |
184 | * for (k = 5; k <= 44; k++) wt[k] = *e++; |
185 | * for (k = 45; k <= 49; k++) wt[k] = 0; |
186 | * |
187 | * (e[-5..-1] and e[40..44] are allocated by the caller, |
188 | * are initially zero and are not written anywhere.) |
189 | */ |
190 | e -= 5; |
191 | |
192 | /* Compute the signal x[0..39] */ |
193 | for (k = 0; k < 40; k++) |
194 | { |
195 | result = 8192 >> 1; |
196 | |
197 | /* for (i = 0; i <= 10; i++) |
198 | * { |
199 | * temp = sat_mul16_32(wt[k + i], gsm_H[i]); |
200 | * result = sat_add32(result, temp); |
201 | * } |
202 | */ |
203 | |
204 | #undef STEP |
205 | #define STEP(i,H)L_temp = x[i + 3*H] >> 2; L_result += L_temp*L_temp; (e[k + i] * (int32_t) H) |
206 | |
207 | /* Every one of these multiplications is done twice, |
208 | but I don't see an elegant way to optimize this. |
209 | Do you? |
210 | */ |
211 | result += STEP( 0, -134)L_temp = x[0 + 3*-134] >> 2; L_result += L_temp*L_temp;; |
212 | result += STEP( 1, -374)L_temp = x[1 + 3*-374] >> 2; L_result += L_temp*L_temp;; |
213 | /* += STEP( 2, 0 ); */ |
214 | result += STEP( 3, 2054)L_temp = x[3 + 3*2054] >> 2; L_result += L_temp*L_temp;; |
215 | result += STEP( 4, 5741)L_temp = x[4 + 3*5741] >> 2; L_result += L_temp*L_temp;; |
216 | result += STEP( 5, 8192)L_temp = x[5 + 3*8192] >> 2; L_result += L_temp*L_temp;; |
217 | result += STEP( 6, 5741)L_temp = x[6 + 3*5741] >> 2; L_result += L_temp*L_temp;; |
218 | result += STEP( 7, 2054)L_temp = x[7 + 3*2054] >> 2; L_result += L_temp*L_temp;; |
219 | /* += STEP( 8, 0 ); */ |
220 | result += STEP( 9, -374)L_temp = x[9 + 3*-374] >> 2; L_result += L_temp*L_temp;; |
221 | result += STEP(10, -134)L_temp = x[10 + 3*-134] >> 2; L_result += L_temp*L_temp ;; |
222 | |
223 | /* 2 adds vs. >> 16 => 14, minus one shift to compensate for |
224 | those we lost when replacing L_MULT by '*'. */ |
225 | result >>= 13; |
226 | x[k] = saturate16(result); |
227 | } |
228 | /*endfor*/ |
229 | #endif |
230 | } |
231 | /*- End of function --------------------------------------------------------*/ |
232 | |
233 | /* 4.2.14 */ |
234 | static void rpe_grid_selection(int16_t x[40], int16_t xM[13], int16_t *Mc_out) |
235 | { |
236 | int i; |
237 | int32_t L_result; |
238 | int32_t L_temp; |
239 | int32_t EM; /* xxx should be L_EM? */ |
240 | int16_t Mc; |
241 | int32_t L_common_0_3; |
242 | |
243 | /* The signal x[0..39] is used to select the RPE grid which is |
244 | represented by Mc. */ |
245 | EM = 0; |
Value stored to 'EM' is never read | |
246 | Mc = 0; |
247 | |
248 | #undef STEP |
249 | #define STEP(m,i)L_temp = x[m + 3*i] >> 2; L_result += L_temp*L_temp; \ |
250 | L_temp = x[m + 3*i] >> 2; \ |
251 | L_result += L_temp*L_temp; |
252 | |
253 | /* Common part of 0 and 3 */ |
254 | L_result = 0; |
255 | STEP(0, 1)L_temp = x[0 + 3*1] >> 2; L_result += L_temp*L_temp;; |
256 | STEP(0, 2)L_temp = x[0 + 3*2] >> 2; L_result += L_temp*L_temp;; |
257 | STEP(0, 3)L_temp = x[0 + 3*3] >> 2; L_result += L_temp*L_temp;; |
258 | STEP(0, 4)L_temp = x[0 + 3*4] >> 2; L_result += L_temp*L_temp;; |
259 | STEP(0, 5)L_temp = x[0 + 3*5] >> 2; L_result += L_temp*L_temp;; |
260 | STEP(0, 6)L_temp = x[0 + 3*6] >> 2; L_result += L_temp*L_temp;; |
261 | STEP(0, 7)L_temp = x[0 + 3*7] >> 2; L_result += L_temp*L_temp;; |
262 | STEP(0, 8)L_temp = x[0 + 3*8] >> 2; L_result += L_temp*L_temp;; |
263 | STEP(0, 9)L_temp = x[0 + 3*9] >> 2; L_result += L_temp*L_temp;; |
264 | STEP(0, 10)L_temp = x[0 + 3*10] >> 2; L_result += L_temp*L_temp;; |
265 | STEP(0, 11)L_temp = x[0 + 3*11] >> 2; L_result += L_temp*L_temp;; |
266 | STEP(0, 12)L_temp = x[0 + 3*12] >> 2; L_result += L_temp*L_temp;; |
267 | L_common_0_3 = L_result; |
268 | |
269 | /* i = 0 */ |
270 | |
271 | STEP(0, 0)L_temp = x[0 + 3*0] >> 2; L_result += L_temp*L_temp;; |
272 | L_result <<= 1; /* implicit in L_MULT */ |
273 | EM = L_result; |
274 | |
275 | /* i = 1 */ |
276 | |
277 | L_result = 0; |
278 | STEP(1, 0)L_temp = x[1 + 3*0] >> 2; L_result += L_temp*L_temp;; |
279 | STEP(1, 1)L_temp = x[1 + 3*1] >> 2; L_result += L_temp*L_temp;; |
280 | STEP(1, 2)L_temp = x[1 + 3*2] >> 2; L_result += L_temp*L_temp;; |
281 | STEP(1, 3)L_temp = x[1 + 3*3] >> 2; L_result += L_temp*L_temp;; |
282 | STEP(1, 4)L_temp = x[1 + 3*4] >> 2; L_result += L_temp*L_temp;; |
283 | STEP(1, 5)L_temp = x[1 + 3*5] >> 2; L_result += L_temp*L_temp;; |
284 | STEP(1, 6)L_temp = x[1 + 3*6] >> 2; L_result += L_temp*L_temp;; |
285 | STEP(1, 7)L_temp = x[1 + 3*7] >> 2; L_result += L_temp*L_temp;; |
286 | STEP(1, 8)L_temp = x[1 + 3*8] >> 2; L_result += L_temp*L_temp;; |
287 | STEP(1, 9)L_temp = x[1 + 3*9] >> 2; L_result += L_temp*L_temp;; |
288 | STEP(1, 10)L_temp = x[1 + 3*10] >> 2; L_result += L_temp*L_temp;; |
289 | STEP(1, 11)L_temp = x[1 + 3*11] >> 2; L_result += L_temp*L_temp;; |
290 | STEP(1, 12)L_temp = x[1 + 3*12] >> 2; L_result += L_temp*L_temp;; |
291 | L_result <<= 1; |
292 | if (L_result > EM) |
293 | { |
294 | Mc = 1; |
295 | EM = L_result; |
296 | } |
297 | /*endif*/ |
298 | |
299 | /* i = 2 */ |
300 | |
301 | L_result = 0; |
302 | STEP(2, 0)L_temp = x[2 + 3*0] >> 2; L_result += L_temp*L_temp;; |
303 | STEP(2, 1)L_temp = x[2 + 3*1] >> 2; L_result += L_temp*L_temp;; |
304 | STEP(2, 2)L_temp = x[2 + 3*2] >> 2; L_result += L_temp*L_temp;; |
305 | STEP(2, 3)L_temp = x[2 + 3*3] >> 2; L_result += L_temp*L_temp;; |
306 | STEP(2, 4)L_temp = x[2 + 3*4] >> 2; L_result += L_temp*L_temp;; |
307 | STEP(2, 5)L_temp = x[2 + 3*5] >> 2; L_result += L_temp*L_temp;; |
308 | STEP(2, 6)L_temp = x[2 + 3*6] >> 2; L_result += L_temp*L_temp;; |
309 | STEP(2, 7)L_temp = x[2 + 3*7] >> 2; L_result += L_temp*L_temp;; |
310 | STEP(2, 8)L_temp = x[2 + 3*8] >> 2; L_result += L_temp*L_temp;; |
311 | STEP(2, 9)L_temp = x[2 + 3*9] >> 2; L_result += L_temp*L_temp;; |
312 | STEP(2, 10)L_temp = x[2 + 3*10] >> 2; L_result += L_temp*L_temp;; |
313 | STEP(2, 11)L_temp = x[2 + 3*11] >> 2; L_result += L_temp*L_temp;; |
314 | STEP(2, 12)L_temp = x[2 + 3*12] >> 2; L_result += L_temp*L_temp;; |
315 | L_result <<= 1; |
316 | if (L_result > EM) |
317 | { |
318 | Mc = 2; |
319 | EM = L_result; |
320 | } |
321 | /*endif*/ |
322 | |
323 | /* i = 3 */ |
324 | |
325 | L_result = L_common_0_3; |
326 | STEP(3, 12)L_temp = x[3 + 3*12] >> 2; L_result += L_temp*L_temp;; |
327 | L_result <<= 1; |
328 | if (L_result > EM) |
329 | Mc = 3; |
330 | /*endif*/ |
331 | |
332 | /* Down-sampling by a factor 3 to get the selected xM[0..12] |
333 | RPE sequence. */ |
334 | for (i = 0; i < 13; i++) |
335 | xM[i] = x[Mc + 3*i]; |
336 | /*endfor*/ |
337 | *Mc_out = Mc; |
338 | } |
339 | /*- End of function --------------------------------------------------------*/ |
340 | |
341 | /* 4.12.15 */ |
342 | static void apcm_quantization_xmaxc_to_exp_mant(int16_t xmaxc, |
343 | int16_t *exp_out, |
344 | int16_t *mant_out) |
345 | { |
346 | int16_t exp; |
347 | int16_t mant; |
348 | |
349 | /* Compute exponent and mantissa of the decoded version of xmaxc */ |
350 | exp = 0; |
351 | if (xmaxc > 15) |
352 | exp = (int16_t) ((xmaxc >> 3) - 1); |
353 | /*endif*/ |
354 | mant = xmaxc - (exp << 3); |
355 | |
356 | if (mant == 0) |
357 | { |
358 | exp = -4; |
359 | mant = 7; |
360 | } |
361 | else |
362 | { |
363 | while (mant <= 7) |
364 | { |
365 | mant = (int16_t) (mant << 1 | 1); |
366 | exp--; |
367 | } |
368 | /*endwhile*/ |
369 | mant -= 8; |
370 | } |
371 | /*endif*/ |
372 | |
373 | assert(exp >= -4 && exp <= 6)((void) (0)); |
374 | assert(mant >= 0 && mant <= 7)((void) (0)); |
375 | |
376 | *exp_out = exp; |
377 | *mant_out = mant; |
378 | } |
379 | /*- End of function --------------------------------------------------------*/ |
380 | |
381 | static void apcm_quantization(int16_t xM[13], |
382 | int16_t xMc[13], |
383 | int16_t *mant_out, |
384 | int16_t *exp_out, |
385 | int16_t *xmaxc_out) |
386 | { |
387 | /* Table 4.5 Normalized inverse mantissa used to compute xM/xmax */ |
388 | static const int16_t gsm_NRFAC[8] = |
389 | { |
390 | 29128, 26215, 23832, 21846, 20165, 18725, 17476, 16384 |
391 | }; |
392 | int i; |
393 | int itest; |
394 | int16_t xmax; |
395 | int16_t xmaxc; |
396 | int16_t temp; |
397 | int16_t temp1; |
398 | int16_t temp2; |
399 | int16_t exp; |
400 | int16_t mant; |
401 | |
402 | /* Find the maximum absolute value xmax of xM[0..12]. */ |
403 | xmax = 0; |
404 | for (i = 0; i < 13; i++) |
405 | { |
406 | temp = xM[i]; |
407 | temp = sat_abs16(temp); |
408 | if (temp > xmax) |
409 | xmax = temp; |
410 | /*endif*/ |
411 | } |
412 | /*endfor*/ |
413 | |
414 | /* Quantizing and coding of xmax to get xmaxc. */ |
415 | exp = 0; |
416 | temp = xmax >> 9; |
417 | itest = 0; |
418 | |
419 | for (i = 0; i <= 5; i++) |
420 | { |
421 | itest |= (temp <= 0); |
422 | temp >>= 1; |
423 | |
424 | assert(exp <= 5)((void) (0)); |
425 | if (itest == 0) |
426 | exp++; |
427 | /*endif*/ |
428 | } |
429 | /*endfor*/ |
430 | |
431 | assert(exp <= 6 && exp >= 0)((void) (0)); |
432 | temp = (int16_t) (exp + 5); |
433 | |
434 | assert(temp <= 11 && temp >= 0)((void) (0)); |
435 | xmaxc = sat_add16((xmax >> temp), exp << 3); |
436 | |
437 | /* Quantizing and coding of the xM[0..12] RPE sequence |
438 | to get the xMc[0..12] */ |
439 | apcm_quantization_xmaxc_to_exp_mant(xmaxc, &exp, &mant); |
440 | |
441 | /* This computation uses the fact that the decoded version of xmaxc |
442 | can be calculated by using the exponent and the mantissa part of |
443 | xmaxc (logarithmic table). |
444 | So, this method avoids any division and uses only a scaling |
445 | of the RPE samples by a function of the exponent. A direct |
446 | multiplication by the inverse of the mantissa (NRFAC[0..7] |
447 | found in table 4.5) gives the 3 bit coded version xMc[0..12] |
448 | of the RPE samples. |
449 | */ |
450 | /* Direct computation of xMc[0..12] using table 4.5 */ |
451 | assert(exp <= 4096 && exp >= -4096)((void) (0)); |
452 | assert(mant >= 0 && mant <= 7)((void) (0)); |
453 | |
454 | temp1 = (int16_t) (6 - exp); /* Normalization by the exponent */ |
455 | temp2 = gsm_NRFAC[mant]; /* Inverse mantissa */ |
456 | |
457 | for (i = 0; i < 13; i++) |
458 | { |
459 | assert(temp1 >= 0 && temp1 < 16)((void) (0)); |
460 | |
461 | temp = xM[i] << temp1; |
462 | temp = sat_mul16(temp, temp2); |
463 | temp >>= 12; |
464 | xMc[i] = (int16_t) (temp + 4); /* See note below */ |
465 | } |
466 | /*endfor*/ |
467 | |
468 | /* NOTE: This equation is used to make all the xMc[i] positive. */ |
469 | *mant_out = mant; |
470 | *exp_out = exp; |
471 | *xmaxc_out = xmaxc; |
472 | } |
473 | /*- End of function --------------------------------------------------------*/ |
474 | |
475 | /* 4.2.16 */ |
476 | static void apcm_inverse_quantization(int16_t xMc[13], |
477 | int16_t mant, |
478 | int16_t exp, |
479 | int16_t xMp[13]) |
480 | { |
481 | /* Table 4.6 Normalized direct mantissa used to compute xM/xmax */ |
482 | static const int16_t gsm_fac[8] = |
483 | { |
484 | 18431, 20479, 22527, 24575, 26623, 28671, 30719, 32767 |
485 | }; |
486 | int i; |
487 | int16_t temp; |
488 | int16_t temp1; |
489 | int16_t temp2; |
490 | int16_t temp3; |
491 | |
492 | /* This part is for decoding the RPE sequence of coded xMc[0..12] |
493 | samples to obtain the xMp[0..12] array. Table 4.6 is used to get |
494 | the mantissa of xmaxc (FAC[0..7]). |
495 | */ |
496 | #if 0 |
497 | assert(mant >= 0 && mant <= 7)((void) (0)); |
498 | #endif |
499 | |
500 | temp1 = gsm_fac[mant]; /* See 4.2-15 for mant */ |
501 | temp2 = sat_sub16(6, exp); /* See 4.2-15 for exp */ |
502 | temp3 = gsm_asl(1, sat_sub16(temp2, 1)); |
503 | |
504 | for (i = 0; i < 13; i++) |
505 | { |
506 | assert(xMc[i] >= 0 && xMc[i] <= 7)((void) (0)); /* 3 bit unsigned */ |
507 | |
508 | temp = (int16_t) ((xMc[i] << 1) - 7); /* Restore sign */ |
509 | assert(temp <= 7 && temp >= -7)((void) (0)); /* 4 bit signed */ |
510 | |
511 | temp <<= 12; /* 16 bit signed */ |
512 | temp = gsm_mult_r(temp1, temp); |
513 | temp = sat_add16(temp, temp3); |
514 | xMp[i] = gsm_asr(temp, temp2); |
515 | } |
516 | /*endfor*/ |
517 | } |
518 | /*- End of function --------------------------------------------------------*/ |
519 | |
520 | /* 4.2.17 */ |
521 | static void rpe_grid_positioning(int16_t Mc, |
522 | int16_t xMp[13], |
523 | int16_t ep[40]) |
524 | { |
525 | int i = 13; |
526 | |
527 | /* This procedure computes the reconstructed long term residual signal |
528 | ep[0..39] for the LTP analysis filter. The inputs are the Mc |
529 | which is the grid position selection and the xMp[0..12] decoded |
530 | RPE samples which are upsampled by a factor of 3 by inserting zero |
531 | values. |
532 | */ |
533 | assert(0 <= Mc && Mc <= 3)((void) (0)); |
534 | |
535 | switch (Mc) |
536 | { |
537 | case 3: |
538 | *ep++ = 0; |
539 | case 2: |
540 | do |
541 | { |
542 | *ep++ = 0; |
543 | case 1: |
544 | *ep++ = 0; |
545 | case 0: |
546 | *ep++ = *xMp++; |
547 | } |
548 | while (--i); |
549 | } |
550 | /*endswitch*/ |
551 | while (++Mc < 4) |
552 | *ep++ = 0; |
553 | /*endwhile*/ |
554 | } |
555 | /*- End of function --------------------------------------------------------*/ |
556 | |
557 | void gsm0610_rpe_encoding(gsm0610_state_t *s, |
558 | int16_t *e, // [-5..-1][0..39][40..44] |
559 | int16_t *xmaxc, |
560 | int16_t *Mc, |
561 | int16_t xMc[13]) |
562 | { |
563 | int16_t x[40]; |
564 | int16_t xM[13]; |
565 | int16_t xMp[13]; |
566 | int16_t mant; |
567 | int16_t exp; |
568 | |
569 | weighting_filter(x, e); |
570 | rpe_grid_selection(x, xM, Mc); |
571 | |
572 | apcm_quantization(xM, xMc, &mant, &exp, xmaxc); |
573 | apcm_inverse_quantization(xMc, mant, exp, xMp); |
574 | |
575 | rpe_grid_positioning(*Mc, xMp, e); |
576 | } |
577 | /*- End of function --------------------------------------------------------*/ |
578 | |
579 | void gsm0610_rpe_decoding(gsm0610_state_t *s, |
580 | int16_t xmaxc, |
581 | int16_t Mcr, |
582 | int16_t xMcr[13], |
583 | int16_t erp[40]) |
584 | { |
585 | int16_t exp; |
586 | int16_t mant; |
587 | int16_t xMp[13]; |
588 | |
589 | apcm_quantization_xmaxc_to_exp_mant(xmaxc, &exp, &mant); |
590 | apcm_inverse_quantization(xMcr, mant, exp, xMp); |
591 | rpe_grid_positioning(Mcr, xMp, erp); |
592 | } |
593 | /*- End of function --------------------------------------------------------*/ |
594 | /*- End of file ------------------------------------------------------------*/ |