25#if (dsps_cplx2real_fc32_ae32_enabled == 1)
30 int wind_step = table_size / (fft_points);
32 float *win0 = table + wind_step;
33 float *win1 = table + wind_step * 2;
37 asm volatile (
"const.s f14, 0");
38 asm volatile (
"lsi f0, %0, 0" ::
"a" (
data));
39 asm volatile (
"lsi f1, %0, 4" ::
"a" (
data));
40 asm volatile (
"addx8 %0, %1, %2" :
"=a" (ptr_inv) :
"a" (fft_points),
"a" (
data));
41 asm volatile (
"add.s f6, f0, f1");
42 asm volatile (
"sub.s f7, f0, f1");
44 asm volatile (
"srli %0, %0, 2" :
"+a" (fft_points));
46 asm volatile (
"const.s f14, 3");
47 asm volatile (
"neg.s f15, f14");
49 asm volatile (
"ssi f6, %0, 0" ::
"a" (
data));
50 asm volatile (
"ssi f7, %0, 4" ::
"a" (
data));
51 asm volatile (
"addi %0, %0, -16" :
"+a" (ptr_inv));
53 asm volatile (
"loopnez %0, __loop_end_fftr_real_post_proc" ::
"a" (fft_points));
54 asm volatile (
"lsi f1, %0, 12" ::
"a" (
data));
55 asm volatile (
"lsi f3, %0, 12" ::
"a" (ptr_inv));
56 asm volatile (
"lsi f0, %0, 8" ::
"a" (
data));
57 asm volatile (
"lsi f2, %0, 8" ::
"a" (ptr_inv));
59 asm volatile (
"lsi f7, %0, 20" ::
"a" (
data));
60 asm volatile (
"lsi f9, %0, 4" ::
"a" (ptr_inv));
62 asm volatile (
"lsi f6, %0, 16" ::
"a" (
data));
63 asm volatile (
"lsi f8, %0, 0" ::
"a" (ptr_inv));
64 asm volatile (
"sub.s f5, f1, f3");
65 asm volatile (
"add.s f4, f0, f2");
66 asm volatile (
"sub.s f11, f7, f9");
67 asm volatile (
"add.s f10, f6, f8");
69 asm volatile (
"add.s f1, f1, f3");
70 asm volatile (
"sub.s f0, f0, f2");
72 asm volatile (
"lsi f12, %0, 4" ::
"a" (win0));
73 asm volatile (
"add.s f7, f7, f9");
74 asm volatile (
"sub.s f6, f6, f8");
75 asm volatile (
"lsi f13, %0, 4" ::
"a" (win1));
77 asm volatile (
"mul.s f3, f1, f12");
78 asm volatile (
"mul.s f2, f0, f12");
79 asm volatile (
"lsi f12, %0, 0" ::
"a" (win0));
81 asm volatile (
"mul.s f9, f7, f13");
82 asm volatile (
"mul.s f8, f6, f13");
84 asm volatile (
"lsi f13, %0, 0" ::
"a" (win1));
86 asm volatile (
"madd.s f3, f0, f12");
87 asm volatile (
"msub.s f2, f1, f12");
88 asm volatile (
"madd.s f9, f6, f13");
89 asm volatile (
"msub.s f8, f7, f13");
90 asm volatile (
"addx8 %0, %1, %0" :
"+a" (win0) :
"a" (wind_step));
91 asm volatile (
"addx8 %0, %1, %0" :
"+a" (win1) :
"a" (wind_step));
94 asm volatile (
"sub.s f1, f5, f3");
95 asm volatile (
"sub.s f0, f4, f2");
97 asm volatile (
"add.s f3, f3, f5");
98 asm volatile (
"add.s f2, f4, f2");
100 asm volatile (
"sub.s f7, f11, f9");
101 asm volatile (
"sub.s f6, f10, f8");
102 asm volatile (
"add.s f9, f9, f11");
103 asm volatile (
"add.s f8, f10, f8");
105 asm volatile (
"mul.s f1, f1, f14");
106 asm volatile (
"mul.s f0, f0, f14");
108 asm volatile (
"mul.s f3, f3, f15");
109 asm volatile (
"mul.s f2, f2, f14");
111 asm volatile (
"mul.s f7, f7, f14");
112 asm volatile (
"mul.s f6, f6, f14");
113 asm volatile (
"mul.s f9, f9, f15");
114 asm volatile (
"mul.s f8, f8, f14");
116 asm volatile (
"ssi f1, %0, 12" ::
"a" (
data));
117 asm volatile (
"ssi f0, %0, 8" ::
"a" (
data));
119 asm volatile (
"ssi f3, %0, 12" ::
"a" (ptr_inv));
120 asm volatile (
"ssi f2, %0, 8" ::
"a" (ptr_inv));
122 asm volatile (
"ssi f7, %0, 20" ::
"a" (
data));
123 asm volatile (
"ssi f6, %0, 16" ::
"a" (
data));
124 asm volatile (
"addi %0, %0, 16" :
"+a" (
data));
126 asm volatile (
"ssi f9, %0, 4" ::
"a" (ptr_inv));
127 asm volatile (
"ssi f8, %0, 0" ::
"a" (ptr_inv));
128 asm volatile (
"addi %0, %0, -16" :
"+a" (ptr_inv));
130 asm volatile (
"__loop_end_fftr_real_post_proc: nop");
esp_err_t dsps_cplx2real_fc32_ae32_(float *data, int N, float *table, int table_size)
static float data[128 *2]