通过使用 AVX 内在函数重写 math.h 函数的性能改进
Performance improvement of math.h functions by rewriting with AVX intrinsics
我有一个简单的数学库,它链接到一个在模拟器硬件(32 位 RTOS)上运行的项目,编译器工具链基于 GCC 5.5 的变体。主要项目代码在 Matlab 中,但核心数学运算(数组数据上的 cmath 函数)用 C 语言重新编写以提高性能。查看 Compiler Explorer,优化代码的质量对于 GCC 5.5 32 bit (for reference: Clang trunk 32bit 而言似乎并不理想。据我了解,Clang 在优化循环方面做得更好。一个示例代码片段:
...
void cfunctionsLog10(unsigned int n, const double* x, double* y) {
int i;
for (i = 0; i < n; i++) {
y[i] = log10(x[i]);
}
}
以及GCC 5.5生成的对应程序集
cfunctionsLog10(unsigned int, double const*, double*):
push ebp
push edi
push esi
push ebx
sub esp, 12
mov esi, DWORD PTR [esp+32]
mov ebp, DWORD PTR [esp+36]
mov edi, DWORD PTR [esp+40]
test esi, esi
je .L28
xor ebx, ebx
.L27:
sub esp, 8
push DWORD PTR [ebp+4+ebx*8]
push DWORD PTR [ebp+0+ebx*8]
call __log10_finite
fstp QWORD PTR [edi+ebx*8]
add ebx, 1
add esp, 16
cmp ebx, esi
jne .L27
.L28:
add esp, 12
pop ebx
pop esi
pop edi
pop ebp
ret
Clang 产生的地方:
cfunctionsLog10(unsigned int, double const*, double*): # @cfunctionsLog10(unsigned int, double const*, double*)
push ebp
push ebx
push edi
push esi
sub esp, 76
mov esi, dword ptr [esp + 96]
test esi, esi
je .LBB2_8
mov edi, dword ptr [esp + 104]
mov ebx, dword ptr [esp + 100]
xor ebp, ebp
cmp esi, 4
jb .LBB2_7
lea eax, [ebx + 8*esi]
cmp eax, edi
jbe .LBB2_4
lea eax, [edi + 8*esi]
cmp eax, ebx
ja .LBB2_7
.LBB2_4:
mov ebp, esi
xor esi, esi
and ebp, -4
.LBB2_5: # =>This Inner Loop Header: Depth=1
vmovsd xmm0, qword ptr [ebx + 8*esi + 16] # xmm0 = mem[0],zero
vmovsd qword ptr [esp], xmm0
vmovsd xmm0, qword ptr [ebx + 8*esi] # xmm0 = mem[0],zero
vmovsd xmm1, qword ptr [ebx + 8*esi + 8] # xmm1 = mem[0],zero
vmovsd qword ptr [esp + 8], xmm0 # 8-byte Spill
vmovsd qword ptr [esp + 16], xmm1 # 8-byte Spill
call log10
fstp tbyte ptr [esp + 64] # 10-byte Folded Spill
vmovsd xmm0, qword ptr [esp + 16] # 8-byte Reload
vmovsd qword ptr [esp], xmm0
call log10
fstp tbyte ptr [esp + 16] # 10-byte Folded Spill
vmovsd xmm0, qword ptr [esp + 8] # 8-byte Reload
vmovsd qword ptr [esp], xmm0
vmovsd xmm0, qword ptr [ebx + 8*esi + 24] # xmm0 = mem[0],zero
vmovsd qword ptr [esp + 8], xmm0 # 8-byte Spill
call log10
vmovsd xmm0, qword ptr [esp + 8] # 8-byte Reload
vmovsd qword ptr [esp], xmm0
fstp qword ptr [esp + 56]
fld tbyte ptr [esp + 16] # 10-byte Folded Reload
fstp qword ptr [esp + 48]
fld tbyte ptr [esp + 64] # 10-byte Folded Reload
fstp qword ptr [esp + 40]
call log10
fstp qword ptr [esp + 32]
vmovsd xmm0, qword ptr [esp + 56] # xmm0 = mem[0],zero
vmovsd xmm1, qword ptr [esp + 40] # xmm1 = mem[0],zero
vmovhps xmm0, xmm0, qword ptr [esp + 48] # xmm0 = xmm0[0,1],mem[0,1]
vmovhps xmm1, xmm1, qword ptr [esp + 32] # xmm1 = xmm1[0,1],mem[0,1]
vmovups xmmword ptr [edi + 8*esi + 16], xmm1
vmovups xmmword ptr [edi + 8*esi], xmm0
add esi, 4
cmp ebp, esi
jne .LBB2_5
mov esi, dword ptr [esp + 96]
cmp ebp, esi
je .LBB2_8
.LBB2_7: # =>This Inner Loop Header: Depth=1
vmovsd xmm0, qword ptr [ebx + 8*ebp] # xmm0 = mem[0],zero
vmovsd qword ptr [esp], xmm0
call log10
fstp qword ptr [edi + 8*ebp]
inc ebp
cmp esi, ebp
jne .LBB2_7
.LBB2_8:
add esp, 76
pop esi
pop edi
pop ebx
pop ebp
ret
由于我无法直接使用 Clang,使用 AVX 内在函数重写 C 源代码是否有任何价值。我认为大部分性能成本来自 cmath 函数调用,其中大部分没有内部实现。
编辑:
使用 vectorclass library:
重新实现
void vclfunctionsTanh(unsigned int n, const double* x, double* y)
{
const int N = n;
const int VectorSize = 4;
const int FirstPass = N & (-VectorSize);
int i = 0;
for (; i < FirstPass; i+= 4)
{
Vec4d data = Vec4d.load(x[i]);
Vec4d ans = tanh(data);
ans.store(y+i);
}
for (;i < N; ++i)
y[i]=std::tanh(x[i]);
}
矢量 class 库具有常用数学函数的内联矢量版本,包括 log10。
我有一个简单的数学库,它链接到一个在模拟器硬件(32 位 RTOS)上运行的项目,编译器工具链基于 GCC 5.5 的变体。主要项目代码在 Matlab 中,但核心数学运算(数组数据上的 cmath 函数)用 C 语言重新编写以提高性能。查看 Compiler Explorer,优化代码的质量对于 GCC 5.5 32 bit (for reference: Clang trunk 32bit 而言似乎并不理想。据我了解,Clang 在优化循环方面做得更好。一个示例代码片段:
...
void cfunctionsLog10(unsigned int n, const double* x, double* y) {
int i;
for (i = 0; i < n; i++) {
y[i] = log10(x[i]);
}
}
以及GCC 5.5生成的对应程序集
cfunctionsLog10(unsigned int, double const*, double*):
push ebp
push edi
push esi
push ebx
sub esp, 12
mov esi, DWORD PTR [esp+32]
mov ebp, DWORD PTR [esp+36]
mov edi, DWORD PTR [esp+40]
test esi, esi
je .L28
xor ebx, ebx
.L27:
sub esp, 8
push DWORD PTR [ebp+4+ebx*8]
push DWORD PTR [ebp+0+ebx*8]
call __log10_finite
fstp QWORD PTR [edi+ebx*8]
add ebx, 1
add esp, 16
cmp ebx, esi
jne .L27
.L28:
add esp, 12
pop ebx
pop esi
pop edi
pop ebp
ret
Clang 产生的地方:
cfunctionsLog10(unsigned int, double const*, double*): # @cfunctionsLog10(unsigned int, double const*, double*)
push ebp
push ebx
push edi
push esi
sub esp, 76
mov esi, dword ptr [esp + 96]
test esi, esi
je .LBB2_8
mov edi, dword ptr [esp + 104]
mov ebx, dword ptr [esp + 100]
xor ebp, ebp
cmp esi, 4
jb .LBB2_7
lea eax, [ebx + 8*esi]
cmp eax, edi
jbe .LBB2_4
lea eax, [edi + 8*esi]
cmp eax, ebx
ja .LBB2_7
.LBB2_4:
mov ebp, esi
xor esi, esi
and ebp, -4
.LBB2_5: # =>This Inner Loop Header: Depth=1
vmovsd xmm0, qword ptr [ebx + 8*esi + 16] # xmm0 = mem[0],zero
vmovsd qword ptr [esp], xmm0
vmovsd xmm0, qword ptr [ebx + 8*esi] # xmm0 = mem[0],zero
vmovsd xmm1, qword ptr [ebx + 8*esi + 8] # xmm1 = mem[0],zero
vmovsd qword ptr [esp + 8], xmm0 # 8-byte Spill
vmovsd qword ptr [esp + 16], xmm1 # 8-byte Spill
call log10
fstp tbyte ptr [esp + 64] # 10-byte Folded Spill
vmovsd xmm0, qword ptr [esp + 16] # 8-byte Reload
vmovsd qword ptr [esp], xmm0
call log10
fstp tbyte ptr [esp + 16] # 10-byte Folded Spill
vmovsd xmm0, qword ptr [esp + 8] # 8-byte Reload
vmovsd qword ptr [esp], xmm0
vmovsd xmm0, qword ptr [ebx + 8*esi + 24] # xmm0 = mem[0],zero
vmovsd qword ptr [esp + 8], xmm0 # 8-byte Spill
call log10
vmovsd xmm0, qword ptr [esp + 8] # 8-byte Reload
vmovsd qword ptr [esp], xmm0
fstp qword ptr [esp + 56]
fld tbyte ptr [esp + 16] # 10-byte Folded Reload
fstp qword ptr [esp + 48]
fld tbyte ptr [esp + 64] # 10-byte Folded Reload
fstp qword ptr [esp + 40]
call log10
fstp qword ptr [esp + 32]
vmovsd xmm0, qword ptr [esp + 56] # xmm0 = mem[0],zero
vmovsd xmm1, qword ptr [esp + 40] # xmm1 = mem[0],zero
vmovhps xmm0, xmm0, qword ptr [esp + 48] # xmm0 = xmm0[0,1],mem[0,1]
vmovhps xmm1, xmm1, qword ptr [esp + 32] # xmm1 = xmm1[0,1],mem[0,1]
vmovups xmmword ptr [edi + 8*esi + 16], xmm1
vmovups xmmword ptr [edi + 8*esi], xmm0
add esi, 4
cmp ebp, esi
jne .LBB2_5
mov esi, dword ptr [esp + 96]
cmp ebp, esi
je .LBB2_8
.LBB2_7: # =>This Inner Loop Header: Depth=1
vmovsd xmm0, qword ptr [ebx + 8*ebp] # xmm0 = mem[0],zero
vmovsd qword ptr [esp], xmm0
call log10
fstp qword ptr [edi + 8*ebp]
inc ebp
cmp esi, ebp
jne .LBB2_7
.LBB2_8:
add esp, 76
pop esi
pop edi
pop ebx
pop ebp
ret
由于我无法直接使用 Clang,使用 AVX 内在函数重写 C 源代码是否有任何价值。我认为大部分性能成本来自 cmath 函数调用,其中大部分没有内部实现。
编辑: 使用 vectorclass library:
重新实现void vclfunctionsTanh(unsigned int n, const double* x, double* y)
{
const int N = n;
const int VectorSize = 4;
const int FirstPass = N & (-VectorSize);
int i = 0;
for (; i < FirstPass; i+= 4)
{
Vec4d data = Vec4d.load(x[i]);
Vec4d ans = tanh(data);
ans.store(y+i);
}
for (;i < N; ++i)
y[i]=std::tanh(x[i]);
}
矢量 class 库具有常用数学函数的内联矢量版本,包括 log10。