与裸机相比,SSE 矢量包装器类型的性能 __m128

SSE vector wrapper type performance compared to bare __m128

我发现了一个有趣的 Gamasutra article 关于 SIMD 陷阱,其中指出使用包装器类型无法达到 "pure" __m128 类型的性能。好吧,我持怀疑态度,所以我下载了项目文件并制作了一个可比较的测试用例。

事实证明(令我惊讶的是)包装器版本要慢得多。不想空谈,测试用例如下:

在第一种情况下 Vec4__m128 类型的简单别名,带有一些运算符:

#include <xmmintrin.h>
#include <emmintrin.h>

using Vec4 = __m128;

inline __m128 VLoad(float f)
{
    return _mm_set_ps(f, f, f, f);
};

inline Vec4& operator+=(Vec4 &va, Vec4 vb)
{
    return (va = _mm_add_ps(va, vb));
};

inline Vec4& operator*=(Vec4 &va, Vec4 vb)
{
    return (va = _mm_mul_ps(va, vb));
};

inline Vec4 operator+(Vec4 va, Vec4 vb)
{
    return _mm_add_ps(va, vb);
};

inline Vec4 operator-(Vec4 va, Vec4 vb)
{
    return _mm_sub_ps(va, vb);
};

inline Vec4 operator*(Vec4 va, Vec4 vb)
{
    return _mm_mul_ps(va, vb);
};

第二种情况 Vec4__m128 的轻量级封装。 它不是完整的包装,只是涵盖问题的简短草图。运算符包装完全相同的内在函数,唯一的区别是(因为 16 字节对齐不能应用于参数)它们将 Vec4 作为 const 参考:

#include <xmmintrin.h>
#include <emmintrin.h>

struct Vec4
{
    __m128 simd;

    inline Vec4() = default;
    inline Vec4(const Vec4&) = default;
    inline Vec4& operator=(const Vec4&) = default;

    inline Vec4(__m128 s)
        : simd(s)
    {}

    inline operator __m128() const
    {
        return simd;
    }

    inline operator __m128&()
    {
        return simd;
    }
};

inline __m128 VLoad(float f)
{
    return _mm_set_ps(f, f, f, f);
};

inline Vec4 VAdd(const Vec4 &va, const Vec4 &vb)
{
    return _mm_add_ps(va, vb);
    // return _mm_add_ps(va.simd, vb.simd); // doesn't make difference
};

inline Vec4 VSub(const Vec4 &va, const Vec4 &vb)
{
    return _mm_sub_ps(va, vb);
    // return _mm_sub_ps(va.simd, vb.simd); // doesn't make difference
};

inline Vec4 VMul(const Vec4 &va, const Vec4 &vb)
{
    return _mm_mul_ps(va, vb);
    // return _mm_mul_ps(va.simd, vb.simd); // doesn't make difference
};

这里是测试内核,不同版本的 Vec4:

会产生不同的性能
#include <xmmintrin.h>
#include <emmintrin.h>

struct EQSTATE
{
    // Filter #1 (Low band)

    Vec4  lf;       // Frequency
    Vec4  f1p0;     // Poles ...
    Vec4  f1p1;     
    Vec4  f1p2;
    Vec4  f1p3;

    // Filter #2 (High band)

    Vec4  hf;       // Frequency
    Vec4  f2p0;     // Poles ...
    Vec4  f2p1;
    Vec4  f2p2;
    Vec4  f2p3;

    // Sample history buffer

    Vec4  sdm1;     // Sample data minus 1
    Vec4  sdm2;     //                   2
    Vec4  sdm3;     //                   3

    // Gain Controls

    Vec4  lg;       // low  gain
    Vec4  mg;       // mid  gain
    Vec4  hg;       // high gain

};  

static float vsaf = (1.0f / 4294967295.0f);   // Very small amount (Denormal Fix)
static Vec4 vsa = VLoad(vsaf);

Vec4 TestEQ(EQSTATE* es, Vec4& sample)
{
    // Locals

    Vec4  l,m,h;      // Low / Mid / High - Sample Values

    // Filter #1 (lowpass)

    es->f1p0  += (es->lf * (sample   - es->f1p0)) + vsa;
    //es->f1p0 = VAdd(es->f1p0, VAdd(VMul(es->lf, VSub(sample, es->f1p0)), vsa));

    es->f1p1  += (es->lf * (es->f1p0 - es->f1p1));
    //es->f1p1 = VAdd(es->f1p1, VMul(es->lf, VSub(es->f1p0, es->f1p1)));

    es->f1p2  += (es->lf * (es->f1p1 - es->f1p2));
    //es->f1p2 = VAdd(es->f1p2, VMul(es->lf, VSub(es->f1p1, es->f1p2)));

    es->f1p3  += (es->lf * (es->f1p2 - es->f1p3));
    //es->f1p3 = VAdd(es->f1p3, VMul(es->lf, VSub(es->f1p2, es->f1p3)));

    l          = es->f1p3;

    // Filter #2 (highpass)

    es->f2p0  += (es->hf * (sample   - es->f2p0)) + vsa;
    //es->f2p0 = VAdd(es->f2p0, VAdd(VMul(es->hf, VSub(sample, es->f2p0)), vsa));

    es->f2p1  += (es->hf * (es->f2p0 - es->f2p1));
    //es->f2p1 = VAdd(es->f2p1, VMul(es->hf, VSub(es->f2p0, es->f2p1)));

    es->f2p2  += (es->hf * (es->f2p1 - es->f2p2));
    //es->f2p2 = VAdd(es->f2p2, VMul(es->hf, VSub(es->f2p1, es->f2p2)));

    es->f2p3  += (es->hf * (es->f2p2 - es->f2p3));
    //es->f2p3 = VAdd(es->f2p3, VMul(es->hf, VSub(es->f2p2, es->f2p3)));

    h          = es->sdm3 - es->f2p3;
    //h = VSub(es->sdm3, es->f2p3);

    // Calculate midrange (signal - (low + high))

    m          = es->sdm3 - (h + l);
    //m = VSub(es->sdm3, VAdd(h, l));

    // Scale, Combine and store

    l         *= es->lg;
    m         *= es->mg;
    h         *= es->hg;

    //l = VMul(l, es->lg);
    //m = VMul(m, es->mg);
    //h = VMul(h, es->hg);

    // Shuffle history buffer 

    es->sdm3   = es->sdm2;
    es->sdm2   = es->sdm1;
    es->sdm1   = sample;                

    // Return result

    return(l + m + h);
    //return(VAdd(l, VAdd(m, h)));
}

//make these as globals to enforce the function call;
static Vec4 sample[1024], result[1024];
static EQSTATE es;

#include <chrono>
#include <iostream>

int main()
{
    auto t0 = std::chrono::high_resolution_clock::now();

    for (int ii=0; ii<1024; ii++)
    {
        result[ii] = TestEQ(&es, sample[ii]);
    }

    auto t1 = std::chrono::high_resolution_clock::now();
    auto t = std::chrono::duration_cast<std::chrono::nanoseconds>(t1 - t0).count();
    std::cout << "timing: " << t << '\n';

    std::cin.get();

    return 0;
}

Link 到工作代码


MSVC 2015 为 第一版生成程序集:

;   COMDAT ?TestEQ@@YA?AT__m128@@PAUEQSTATE@@AAT1@@Z
_TEXT   SEGMENT
?TestEQ@@YA?AT__m128@@PAUEQSTATE@@AAT1@@Z PROC      ; TestEQ, COMDAT
; _es$dead$ = ecx
; _sample$ = edx
    vmovaps xmm0, XMMWORD PTR [edx]
    vsubps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+16
    vmovaps xmm2, XMMWORD PTR ?es@@3UEQSTATE@@A
    vmulps  xmm0, xmm0, xmm2
    vaddps  xmm0, xmm0, XMMWORD PTR ?vsa@@3T__m128@@A
    vaddps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+16
    vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+16, xmm0
    vsubps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+32
    vmulps  xmm0, xmm0, xmm2
    vaddps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+32
    vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+32, xmm0
    vsubps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+48
    vmulps  xmm0, xmm0, xmm2
    vaddps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+48
    vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+48, xmm0
    vsubps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+64
    vmulps  xmm0, xmm0, xmm2
    vaddps  xmm4, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+64
    vmovaps xmm2, XMMWORD PTR ?es@@3UEQSTATE@@A+80
    vmovaps xmm1, XMMWORD PTR ?es@@3UEQSTATE@@A+192
    vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+64, xmm4
    vmovaps xmm0, XMMWORD PTR [edx]
    vsubps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+96
    vmulps  xmm0, xmm0, xmm2
    vaddps  xmm0, xmm0, XMMWORD PTR ?vsa@@3T__m128@@A
    vaddps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+96
    vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+96, xmm0
    vsubps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+112
    vmulps  xmm0, xmm0, xmm2
    vaddps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+112
    vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+112, xmm0
    vsubps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+128
    vmulps  xmm0, xmm0, xmm2
    vaddps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+128
    vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+128, xmm0
    vsubps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+144
    vmulps  xmm0, xmm0, xmm2
    vaddps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+144
    vsubps  xmm2, xmm1, xmm0
    vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+144, xmm0
    vmovaps xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+176
    vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+192, xmm0
    vmovaps xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+160
    vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+176, xmm0
    vmovaps xmm0, XMMWORD PTR [edx]
    vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+160, xmm0
    vaddps  xmm0, xmm4, xmm2
    vsubps  xmm0, xmm1, xmm0
    vmulps  xmm1, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+224
    vmulps  xmm0, xmm2, XMMWORD PTR ?es@@3UEQSTATE@@A+240
    vaddps  xmm1, xmm1, xmm0
    vmulps  xmm0, xmm4, XMMWORD PTR ?es@@3UEQSTATE@@A+208
    vaddps  xmm0, xmm1, xmm0
    ret 0
?TestEQ@@YA?AT__m128@@PAUEQSTATE@@AAT1@@Z ENDP      ; TestEQ

MSVC 2015 为 第二版生成程序集:

?TestEQ@@YA?AUVec4@VMATH@@PAUEQSTATE@@AAU12@@Z PROC ; TestEQ, COMDAT
; ___$ReturnUdt$ = ecx
; _es$dead$ = edx
    push    ebx
    mov ebx, esp
    sub esp, 8
    and esp, -8                 ; fffffff8H
    add esp, 4
    push    ebp
    mov ebp, DWORD PTR [ebx+4]
    mov eax, DWORD PTR _sample$[ebx]
    vmovaps xmm2, XMMWORD PTR ?es@@3UEQSTATE@@A
    vmovaps xmm1, XMMWORD PTR ?es@@3UEQSTATE@@A+192
    mov DWORD PTR [esp+4], ebp
    vmovaps xmm0, XMMWORD PTR [eax]
    vsubps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+16
    vmulps  xmm0, xmm0, xmm2
    vaddps  xmm0, xmm0, XMMWORD PTR ?vsa@@3UVec4@VMATH@@A
    vaddps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+16
    vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+16, xmm0
    vsubps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+32
    vmulps  xmm0, xmm0, xmm2
    vaddps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+32
    vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+32, xmm0
    vsubps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+48
    vmulps  xmm0, xmm0, xmm2
    vaddps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+48
    vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+48, xmm0
    vsubps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+64
    vmulps  xmm0, xmm0, xmm2
    vaddps  xmm4, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+64
    vmovaps xmm2, XMMWORD PTR ?es@@3UEQSTATE@@A+80
    vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+64, xmm4
    vmovaps xmm0, XMMWORD PTR [eax]
    vsubps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+96
    vmulps  xmm0, xmm0, xmm2
    vaddps  xmm0, xmm0, XMMWORD PTR ?vsa@@3UVec4@VMATH@@A
    vaddps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+96
    vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+96, xmm0
    vsubps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+112
    vmulps  xmm0, xmm0, xmm2
    vaddps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+112
    vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+112, xmm0
    vsubps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+128
    vmulps  xmm0, xmm0, xmm2
    vaddps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+128
    vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+128, xmm0
    vsubps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+144
    vmulps  xmm0, xmm0, xmm2
    vaddps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+144
    vsubps  xmm2, xmm1, xmm0
    vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+144, xmm0
    vaddps  xmm0, xmm2, xmm4
    vsubps  xmm0, xmm1, xmm0
    vmulps  xmm1, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+224
    vmovdqu xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+176
    vmovdqu XMMWORD PTR ?es@@3UEQSTATE@@A+192, xmm0
    vmovdqu xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+160
    vmovdqu XMMWORD PTR ?es@@3UEQSTATE@@A+176, xmm0
    vmovdqu xmm0, XMMWORD PTR [eax]
    vmovdqu XMMWORD PTR ?es@@3UEQSTATE@@A+160, xmm0
    vmulps  xmm0, xmm4, XMMWORD PTR ?es@@3UEQSTATE@@A+208
    vaddps  xmm1, xmm0, xmm1
    vmulps  xmm0, xmm2, XMMWORD PTR ?es@@3UEQSTATE@@A+240
    vaddps  xmm0, xmm1, xmm0
    vmovaps XMMWORD PTR [ecx], xmm0
    mov eax, ecx
    pop ebp
    mov esp, ebx
    pop ebx
    ret 0
?TestEQ@@YA?AUVec4@VMATH@@PAUEQSTATE@@AAU12@@Z ENDP ; TestEQ

第二版生成的程序集明显更长更慢。它与 Visual Studio 没有严格关系,因为 Clang 3.8 产生类似的性能结果。


Clang 3.8 为 第 1 版生成的程序集:

"?TestEQ@@YAT__m128@@PAUEQSTATE@@AAT1@@Z": # @"?TestEQ@@YAT__m128@@PAUEQSTATE@@AAT1@@Z"
Lfunc_begin0:
Ltmp0:
# BB#0:                                 # %entry
    movl    8(%esp), %eax
    movl    4(%esp), %ecx
    vmovaps _vsa, %xmm0
    vmovaps (%ecx), %xmm1
    vmovaps 16(%ecx), %xmm2
    vmovaps (%eax), %xmm3
    vsubps  %xmm2, %xmm3, %xmm3
    vmulps  %xmm3, %xmm1, %xmm3
    vaddps  %xmm3, %xmm0, %xmm3
    vaddps  %xmm3, %xmm2, %xmm2
    vmovaps %xmm2, 16(%ecx)
    vmovaps 32(%ecx), %xmm3
    vsubps  %xmm3, %xmm2, %xmm2
    vmulps  %xmm2, %xmm1, %xmm2
    vaddps  %xmm2, %xmm3, %xmm2
    vmovaps %xmm2, 32(%ecx)
    vmovaps 48(%ecx), %xmm3
    vsubps  %xmm3, %xmm2, %xmm2
    vmulps  %xmm2, %xmm1, %xmm2
    vaddps  %xmm2, %xmm3, %xmm2
    vmovaps %xmm2, 48(%ecx)
    vmovaps 64(%ecx), %xmm3
    vsubps  %xmm3, %xmm2, %xmm2
    vmulps  %xmm2, %xmm1, %xmm1
    vaddps  %xmm1, %xmm3, %xmm1
    vmovaps %xmm1, 64(%ecx)
    vmovaps 80(%ecx), %xmm2
    vmovaps 96(%ecx), %xmm3
    vmovaps (%eax), %xmm4
    vsubps  %xmm3, %xmm4, %xmm4
    vmulps  %xmm4, %xmm2, %xmm4
    vaddps  %xmm4, %xmm0, %xmm0
    vaddps  %xmm0, %xmm3, %xmm0
    vmovaps %xmm0, 96(%ecx)
    vmovaps 112(%ecx), %xmm3
    vsubps  %xmm3, %xmm0, %xmm0
    vmulps  %xmm0, %xmm2, %xmm0
    vaddps  %xmm0, %xmm3, %xmm0
    vmovaps %xmm0, 112(%ecx)
    vmovaps 128(%ecx), %xmm3
    vsubps  %xmm3, %xmm0, %xmm0
    vmulps  %xmm0, %xmm2, %xmm0
    vaddps  %xmm0, %xmm3, %xmm0
    vmovaps %xmm0, 128(%ecx)
    vmovaps 144(%ecx), %xmm3
    vsubps  %xmm3, %xmm0, %xmm0
    vmulps  %xmm0, %xmm2, %xmm0
    vaddps  %xmm0, %xmm3, %xmm0
    vmovaps %xmm0, 144(%ecx)
    vmovaps 192(%ecx), %xmm2
    vsubps  %xmm0, %xmm2, %xmm0
    vaddps  %xmm0, %xmm1, %xmm3
    vsubps  %xmm3, %xmm2, %xmm2
    vmulps  208(%ecx), %xmm1, %xmm1
    vmulps  224(%ecx), %xmm2, %xmm2
    vmulps  240(%ecx), %xmm0, %xmm0
    vmovaps 176(%ecx), %xmm3
    vmovaps %xmm3, 192(%ecx)
    vmovaps 160(%ecx), %xmm3
    vmovaps %xmm3, 176(%ecx)
    vmovaps (%eax), %xmm3
    vmovaps %xmm3, 160(%ecx)
    vaddps  %xmm2, %xmm0, %xmm0
    vaddps  %xmm0, %xmm1, %xmm0
    retl
Lfunc_end0:

Clang 3.8 为 2nd 版本生成的程序集:

"?TestEQ@@YA?AUVec4@@PAUEQSTATE@@AAU1@@Z": # @"?TestEQ@@YA?AUVec4@@PAUEQSTATE@@AAU1@@Z"
Lfunc_begin0:
Ltmp0:
# BB#0:                                 # %entry
    movl    12(%esp), %ecx
    movl    8(%esp), %edx
    vmovaps (%edx), %xmm0
    vmovaps 16(%edx), %xmm1
    vmovaps (%ecx), %xmm2
    vsubps  %xmm1, %xmm2, %xmm2
    vmulps  %xmm0, %xmm2, %xmm2
    vaddps  _vsa, %xmm2, %xmm2
    vaddps  %xmm2, %xmm1, %xmm1
    vmovaps %xmm1, 16(%edx)
    vmovaps 32(%edx), %xmm2
    vsubps  %xmm2, %xmm1, %xmm1
    vmulps  %xmm0, %xmm1, %xmm1
    vaddps  %xmm1, %xmm2, %xmm1
    vmovaps %xmm1, 32(%edx)
    vmovaps 48(%edx), %xmm2
    vsubps  %xmm2, %xmm1, %xmm1
    vmulps  %xmm0, %xmm1, %xmm1
    vaddps  %xmm1, %xmm2, %xmm1
    vmovaps %xmm1, 48(%edx)
    vmovaps 64(%edx), %xmm2
    vsubps  %xmm2, %xmm1, %xmm1
    vmulps  %xmm0, %xmm1, %xmm0
    vaddps  %xmm0, %xmm2, %xmm0
    vmovaps %xmm0, 64(%edx)
    vmovaps 80(%edx), %xmm1
    vmovaps 96(%edx), %xmm2
    vmovaps (%ecx), %xmm3
    vsubps  %xmm2, %xmm3, %xmm3
    vmulps  %xmm1, %xmm3, %xmm3
    vaddps  _vsa, %xmm3, %xmm3
    vaddps  %xmm3, %xmm2, %xmm2
    vmovaps %xmm2, 96(%edx)
    vmovaps 112(%edx), %xmm3
    vsubps  %xmm3, %xmm2, %xmm2
    vmulps  %xmm1, %xmm2, %xmm2
    vaddps  %xmm2, %xmm3, %xmm2
    vmovaps %xmm2, 112(%edx)
    vmovaps 128(%edx), %xmm3
    vsubps  %xmm3, %xmm2, %xmm2
    vmulps  %xmm1, %xmm2, %xmm2
    vaddps  %xmm2, %xmm3, %xmm2
    vmovaps %xmm2, 128(%edx)
    vmovaps 144(%edx), %xmm3
    vsubps  %xmm3, %xmm2, %xmm2
    vmulps  %xmm1, %xmm2, %xmm1
    vaddps  %xmm1, %xmm3, %xmm1
    vmovaps %xmm1, 144(%edx)
    vmovaps 192(%edx), %xmm2
    vsubps  %xmm1, %xmm2, %xmm1
    vaddps  %xmm1, %xmm0, %xmm3
    vsubps  %xmm3, %xmm2, %xmm2
    vmulps  208(%edx), %xmm0, %xmm0
    vmulps  224(%edx), %xmm2, %xmm2
    movl    4(%esp), %eax
    vmulps  240(%edx), %xmm1, %xmm1
    vmovaps 176(%edx), %xmm3
    vmovaps %xmm3, 192(%edx)
    vmovaps 160(%edx), %xmm3
    vmovaps %xmm3, 176(%edx)
    vmovaps (%ecx), %xmm3
    vmovaps %xmm3, 160(%edx)
    vaddps  %xmm2, %xmm0, %xmm0
    vaddps  %xmm0, %xmm1, %xmm0
    vmovaps %xmm0, (%eax)
    retl
Lfunc_end0:

虽然指令数量相同,但第一个版本仍然快了大约 50%。


我试图找出问题的原因,但没有成功。在第二个 MSVC 程序集中有一些可疑的东西,比如那些丑陋的 vmovdqu 指令。构造、复制赋值运算符和按引用传递也可能不必要地将数据从 SSE 寄存器移回内存,但是我解决或准确识别问题的所有尝试都没有成功。

我真的不认为这样一个简单的包装器不能达到与裸机相同的性能 __m128,无论是什么原因导致的开销都可以消除。

那到底是怎么回事?

事实证明,问题不在于用户定义 struct Vec4。 它与 x86 调用约定密切相关。

Visual C++ 中默认的 x86 调用约定是 __cdecl

Pushes parameters on the stack, in reverse order (right to left)

现在这是一个问题,因为 Vec4 应该保留并在 XMM 寄存器中传递。但让我们看看实际发生了什么。


第一个案例

在第一种情况下 Vec4__m128 的简单类型别名。

using Vec4 = __m128;
/* ... */
Vec4 TestEQ(EQSTATE* es, Vec4 &sample) { ... }

汇编TestEQ生成的函数头为

?TestEQ@@YA?AT__m128@@PAUEQSTATE@@AAT1@@Z PROC      ; TestEQ, COMDAT
; _es$ = ecx
; _sample$ = edx
...

不错。


第2例

在第二种情况下 Vec4 不是 __m128 的别名,它现在是用户定义的类型。

在这里我研究了 x86 和 x64 平台的编译。

x86(32位编译)

由于 __cdecl(这是 x86 中的默认调用约定)不允许将对齐值传递给函数(会发出 Error C2719: 'sample': formal parameter with requested alignment of 16 won't be aligned),我们通过 const 传递它参考。

struct Vec4{ __m128 simd; /* ... */ };
/* ... */
Vec4 TestEQ(EQSTATE* es, const Vec4 &sample) { ... }

TestEQ 生成函数头,如

?TestEQ@@YA?AUVec4@@PAUEQSTATE@@ABU1@@Z PROC        ; TestEQ, COMDAT
; ___$ReturnUdt$ = ecx
; _es$ = edx
    push    ebx
    mov ebx, esp
    sub esp, 8
    and esp, -8                 ; fffffff8H
    add esp, 4
    push    ebp
    mov ebp, DWORD PTR [ebx+4]
    mov eax, DWORD PTR _sample$[ebx]
    ...

这不像第一种情况那么简单。参数被移动到堆栈。在前几条 SSE 指令之间也有一些额外的 mov 指令,这里没有列出。总的来说,这些指令足以在一定程度上影响性能。

x64(64位编译)

Windows 在 x64 中使用不同的调用约定作为 x64 应用程序二进制接口 (ABI).

的一部分

此约定尽可能将数据保存在寄存器中,就像浮点数据保存在 XMM 寄存器中一样。

来自 MSDN Overview of x64 Calling Conventions:

The x64 Application Binary Interface (ABI) is a 4 register fast-call calling convention, with stack-backing for those registers. There is a strict one-to-one correspondence between arguments in a function, and the registers for those arguments. Any argument that doesn’t fit in 8 bytes, or is not 1, 2, 4, or 8 bytes, must be passed by reference. (...) All floating point operations are done using the 16 XMM registers. The arguments are passed in registers RCX, RDX, R8, and R9. If the argumentsare float/double, they are passed in XMM0L, XMM1L, XMM2L, and XMM3L. 16 byte arguments are passed by reference.

来自Wikipedia page for x86-64 calling conventions

The Microsoft x64 calling convention is followed on Windows and pre-boot UEFI (for long mode on x86-64). It uses registers RCX, RDX, R8, R9 for the first four integer or pointer arguments (in that order), and XMM0, XMM1, XMM2, XMM3 are used for floating point arguments. Additional arguments are pushed onto the stack (right to left). Integer return values (similar to x86) are returned in RAX if 64 bits or less. Floating point return values are returned in XMM0.

所以在 x64 模式下的第二种情况为 TestEQ 生成函数头 as

?TestEQ@@YQ?AUVec4@@PAUEQSTATE@@ABU1@@Z PROC        ; TestEQ, COMDAT
; _es$ = ecx
; _sample$ = edx
...

这和第一种情况完全一样!


解决方案

对于 x86 模式,呈现的行为应该明确修复。

最简单的解决方案是 inline 函数。 虽然这只是一个提示,编译器可以完全忽略,但您可以告诉编译器始终内联该函数。然而,有时由于函数大小或任何其他原因,这并不需要。

幸运的是,Microsoft 在 Visual Studio 2013 及更高版本中引入了 __vectorcall 约定(在 x86 和 x64 模式下均可用)。这与默认的 Windows x64 调用约定非常相似,但具有更多可利用的寄存器。

让我们用 __vectorcall 重写第二个案例:

Vec4 __vectorcall TestEQ(EQSTATE* es, const Vec4 &sample) { ... }

现在为 TestEQ 生成的汇编函数头是

?TestEQ@@YQ?AUVec4@@PAUEQSTATE@@ABU1@@Z PROC        ; TestEQ, COMDAT
; _es$ = ecx
; _sample$ = edx
...

最终与 x64 中的 第一种情况 第二种情况相同 .

正如 Peter Cordes 指出的那样,要充分利用 __vectorcallVec4 参数应该按值传递,而不是常量引用。要做到这一点,传递的类型应该满足一些要求,比如它必须是普通的可复制构造的(没有用户定义的复制构造函数)并且不应该包含任何联合。更多信息在下面的评论和 here.

最后的话

看起来 MSVC 在检测到 __m128 参数时会自动应用 __vectorcall 约定作为优化。否则它使用默认调用约定 __cdecl(您可以通过编译器选项更改此行为)。

人们在评论中告诉我,他们没有看到 GCC 和 Clang 生成的这两种情况的汇编之间有太大区别。这是因为这些带有优化标志 -O2 的编译器只是将 TestEQ 函数内联到测试循环体 (see) 中。也有可能它们比 MSVC 更聪明,它们会对函数调用进行更好的优化。