用于对多个非常大的数据阵列进行分组操作的 SIMD 矢量化策略
SIMD vectorization strategies for group-by operations on multiple, very large data arrays
我必须进行大量聚合操作,输出按某个维度 (int/byte ID) 分组。我正在使用 C#,但希望我仍然可以从阅读这篇文章的大多数 C++ 人群那里得到好的建议 :)
简化版如下:
public static (double[], double[]) AggregateDataGroupBy(double[] data, double[] weight, byte[] dimension)
{
int numberOfValues = byte.MaxValue - byte.MinValue + 1;
double[] totalValue = new double[numberOfValues];
double[] totalWeight = new double[numberOfValues];
for (int i = 0; i < data.Length; i++)
{
byte index = dimension[i];
totalValue[index] += data[i];
totalWeight[index] += weight[i];
}
return (totalValue, totalWeight);
}
SIMD 矢量化在不需要对维度进行分组时提供显着的加速。我对向量化操作的第一次尝试是获取正在处理的 4 行维度的 运行 总数,使用收集加载输入向量,执行聚合函数,然后分散回来。由于散点图不是 AVX2 的一部分,因此最后一部分特别慢。
public static unsafe (double[], double[]) AggregateDataGather(double[] data, double[] weight, int[] dimension)
{
int numberOfValues = 256;
double[] totalValue = new double[numberOfValues];
double[] totalWeight = new double[numberOfValues];
if (Avx2.IsSupported)
{
int vectorSize = 256 / 8 / sizeof(double);
int i;
fixed (double* ptr = data, ptr2 = weight, ptrValue = totalValue, ptrWeight = totalWeight)
{
fixed (int* dimptr = dimension)
{
var accValue = stackalloc double[vectorSize];
var accWeight = stackalloc double[vectorSize];
for (i = 0; i <= data.Length - vectorSize; i += vectorSize)
{
Vector128<int> indices = Avx2.LoadVector128(dimptr + i);
var accVectorV = Avx2.GatherVector256(ptrValue, indices, 8);
var accVectorW = Avx2.GatherVector256(ptrWeight, indices, 8);
var v = Avx2.LoadVector256(ptr + i);
var w = Avx2.LoadVector256(ptr2 + i);
accVectorV = Avx2.Add(accVectorV, v);
accVectorW = Avx2.Add(accVectorW, w);
Avx2.Store(accValue, accVectorV);
Avx2.Store(accWeight, accVectorW);
for (int ii = 0; ii < vectorSize; ii++)
{
var index = dimptr[i + ii];
totalValue[index] = accValue[ii];
totalWeight[index] = accWeight[ii];
}
}
}
}
}
else if (Avx.IsSupported || Sse42.IsSupported)
{
// Do other stuff
}
return (totalValue, totalWeight);
}
(请原谅将维度从 byte 更改为 int - 我测试了两者,两者都比较慢)
上面的内在函数版本比我的 Ryzen 3600 上的原始算法运行慢。(100m 值是 268ms,而不是 230ms)
鉴于我的数据仅在多次聚合(超过 hundreds/thousands 不同维度)后才发生变化,我发现我最快的实现可以是将数据(值、权重)存储在一个向量中并进行简单的计算通过...分组。这在 Ryzen 上提供了类似的性能,但在较旧的 i7(没有 AVX)上快了 10%。
public static Vector2[] AggregateData(Vector2[] data, byte[] dimension)
{
int numberOfValues = byte.MaxValue - byte.MinValue + 1;
Vector2[] sum = new Vector2[numberOfValues];
for (int i = 0; i < data.Length; i++)
{
sum[dimension[i]] += data[i];
}
return sum;
}
我读过一些关于直方图函数的论文,这些函数只是简单地计算每个维度的出现次数。与幼稚的方法相比,他们获得了近乎完美的 8 倍加速。
我在尝试使用 AVX2 内在函数时是否遗漏了什么?我是否总是要面对低效的 gather/scatter 操作?任何 comments/suggestions?
作为一个子案例,是否有仅在维度大小较小(一次处理 4 个维度值)时才有效的策略?例如。将值加载到具有单个非零值的向量中,如下所示,并优化一次处理的行数以使用所有缓存内存。
Values (11, 12, 13, 14, 15, 16, 17)
Indicies (1, 0, 3, 1, 2, 0, 3)
=>
<0, 11, 0, 0>
+ <12, 0, 0, 0>
+ <0, 0, 0, 13>
+ <0, 14, 0, 0>
+ <0, 0, 15, 0>
+ <16, 0, 0, 0>
+ <0, 0, 0, 17>
(在我看来,这不是一个可能的解决方案,因为一旦尺寸大小增加效率低下。所以我还没有尝试过,但如果有人建议它作为一种有效的解决方法,我会尝试。)
如评论中所述,矢量化对于此类聚合用例来说很难。但是,这并不意味着 SIMD 对您的问题完全没有用。试试这个版本(未经测试)。
主要思想,此版本节省了 50% 的随机 loads/stores 更新累加器。它在内存中交错累加器,使用 128 位 load/add/store 指令,并在消耗所有输入值后将结果拆分回 2 个 C# 数组。
static unsafe void aggregateSse2( double* accumulators, double* a, double* b, byte* dimension, int count )
{
Debug.Assert( count >= 0 );
double* aEnd = a + ( count & ( ~1 ) );
while( a < aEnd )
{
// Load accumulator corresponding to the first bucket
double* accPointer = accumulators + ( 2u * dimension[ 0 ] );
Vector128<double> acc = Sse2.LoadAlignedVector128( accPointer );
// Load 2 values from each input array.
// BTW, possible to use AVX and unroll by 4 instead of 2, using GetLow/GetHigh to extract the 16-byte pieces.
// Gonna save a bit of loads at the cost of more shuffles, might be slightly faster overall.
Vector128<double> va = Sse2.LoadVector128( a );
Vector128<double> vb = Sse2.LoadVector128( b );
// Increment accumulator with the first value of each array, store back to RAM
acc = Sse2.Add( acc, Sse2.UnpackLow( va, vb ) );
Sse2.StoreAligned( accPointer, acc );
// Load accumulator corresponding to the second bucket.
// Potentially it might be the same pointer, can't load both in advance.
accPointer = accumulators + ( 2u * dimension[ 1 ] );
acc = Sse2.LoadAlignedVector128( accPointer );
a += 2;
b += 2;
dimension += 2;
// Increment accumulator with the second value of each array, store back to RAM
acc = Sse2.Add( acc, Sse2.UnpackHigh( va, vb ) );
Sse2.StoreAligned( accPointer, acc );
}
if( 0 != ( count & 1 ) )
{
// The input size was odd number, one item left at these pointers.
// Load a scalar from first input array into lower lane of a vector
Vector128<double> vec = Sse2.LoadScalarVector128( a );
// Load the accumulator corresponding to the bucket
double* accPointer = accumulators + ( 2u * dimension[ 0 ] );
Vector128<double> acc = Sse2.LoadAlignedVector128( accPointer );
// Load scalar from second input array into higher lane of that vector
vec = Sse2.LoadHigh( vec, b );
// Increment accumulator and store back to RAM
acc = Sse2.Add( acc, vec );
Sse2.StoreAligned( accPointer, acc );
}
}
static unsafe void splitAccumulators( double* values, double* weights, double* accumulators, int numberOfValues )
{
double* end = accumulators + numberOfValues * 2;
while( accumulators < end )
{
Vector128<double> vec = Sse2.LoadAlignedVector128( accumulators );
accumulators += 2;
Sse2.StoreScalar( values, vec );
values++;
Sse2.StoreHigh( weights, vec );
weights++;
}
}
/// <summary>Align pointer by 16 bytes, rounding up.</summary>
[MethodImpl( MethodImplOptions.AggressiveInlining )]
static unsafe void* roundUpBy16( void* pointer )
{
if( Environment.Is64BitProcess ) // This branch is missing from JIT output BTW, it's free.
{
long a = (long)pointer;
a = ( a + 15L ) & ( -16L );
return (void*)a;
}
else
{
int a = (int)pointer;
a = ( a + 15 ) & ( -16 );
return (void*)a;
}
}
[SkipLocalsInit] // Otherwise the runtime gonna zero-initialize the stack allocated buffer, very slowly with `push 0` instructions in a loop.
public static (double[], double[]) AggregateDataSse2( double[] data, double[] weight, byte[] dimension )
{
Debug.Assert( data.Length == weight.Length && data.Length == dimension.Length );
const int numberOfValues = 0x100;
unsafe
{
// The buffer is about 4kb RAM, fits in L1D cache.
// Allocating 2 extra doubles (16 extra bytes) to align the pointer.
double* accumulators = stackalloc double[ ( numberOfValues * 2 ) + 2 ];
// Align by 16 bytes
accumulators = (double*)roundUpBy16( accumulators );
// Clear accumulators with zeros, let's hope the implementation of that standard library method is good.
new Span<double>( accumulators, numberOfValues * 2 ).Fill( 0 );
// Process the input data
fixed( double* a = data )
fixed( double* b = weight )
fixed( byte* dim = dimension )
aggregateSse2( accumulators, a, b, dim, data.Length );
// Split the result into 2 arrays
double[] totalValue = new double[ numberOfValues ];
double[] totalWeight = new double[ numberOfValues ];
fixed( double* values = totalValue )
fixed( double* weights = totalWeight )
splitAccumulators( values, weights, accumulators, numberOfValues );
return (totalValue, totalWeight);
}
}
它只使用 SSE2,因为它不太需要更宽的,但与您的标量版本相比,它仍然应该节省大量的指令和 RAM 事务。我希望所有计算机都有一些可衡量的改进。
[抱歉,如果这应该是评论,而不是答案,但不确定如何在评论中格式化代码]
Soonts 使用交错输入和输出的答案版本如下。然而,它最终看起来等同于 Vector2 结构,除了使用双精度而不是浮点数。有趣的是,这个版本比原来的答案慢。
下面是原始答案(SSE2)、优化答案(SSE2a)、我的“交错”版本(SSE2i)和 Vector2(基于浮点数)结果(用于比较)的结果。
| Type | Method | Seed | Size | Mean | Error | StdDev |
|------------------------------- |------------------- |----- |---------- |---------:|--------:|--------:|
| AggregatorBenchmarkDoubleSSE2 | AggregateBenchmark | 42 | 100000000 | 288.3 ms | 2.25 ms | 2.10 ms |
| AggregatorBenchmarkDoubleSSE2a | AggregateBenchmark | 42 | 100000000 | 275.6 ms | 5.31 ms | 5.45 ms |
| AggregatorBenchmarkDoubleSSE2i | AggregateBenchmark | 42 | 100000000 | 280.3 ms | 2.69 ms | 2.52 ms |
| AggregatorBenchmarkVector2 | AggregateBenchmark | 42 | 100000000 | 259.9 ms | 1.48 ms | 1.31 ms |
static unsafe void aggregateSse2i(double* accumulators, double* a, byte* dimension, int count)
{
Debug.Assert(count >= 0);
double* aEnd = a + 2 * count;
while (a < aEnd)
{
// Load accumulator corresponding to the first bucket
double* accPointer = accumulators + (2u * *dimension);
Vector128<double> acc = Sse2.LoadAlignedVector128(accPointer);
dimension++;
// Load pair of values from input array.
// BTW, possible to use AVX and unroll by 4 instead of 2, using GetLow/GetHigh to extract the 16-byte pieces.
// Gonna save a bit of loads at the cost of more shuffles, might be slightly faster overall.
Vector128<double> va = Sse2.LoadVector128(a);
a += 2;
// Increment accumulator with the first value of each array, store back to RAM
acc = Sse2.Add(acc, va);
Sse2.StoreAligned(accPointer, acc);
}
}
static unsafe void splitAccumulators2(double* values, double* accumulators, int numberOfValues)
{
double* end = accumulators + numberOfValues * 2;
while (accumulators < end)
{
Vector128<double> vec = Sse2.LoadAlignedVector128(accumulators);
accumulators += 2;
Sse2.Store(values, vec);
values += 2;
}
}
[SkipLocalsInit] // Otherwise the runtime gonna zero-initialize the stack allocated buffer, very slowly with `push 0` instructions in a loop.
public static double[] AggregateDataSse2i(double[] data, byte[] dimension)
{
Debug.Assert(data.Length == 2 * dimension.Length);
const int numberOfValues = 0x100;
unsafe
{
// The buffer is about 4kb RAM, fits in L1D cache.
// Allocating 2 extra doubles (16 extra bytes) to align the pointer.
double* accumulators = stackalloc double[(numberOfValues * 2) + 2];
// Align by 16 bytes
accumulators = (double*)roundUpBy16(accumulators);
// Clear accumulators with zeros, let's hope the implementation of that standard library method is good.
new Span<double>(accumulators, numberOfValues * 2).Fill(0);
// Process the input data
fixed (double* a = data)
fixed (byte* dim = dimension)
aggregateSse2i(accumulators, a, dim, dimension.Length);
return new Span<double>(accumulators, numberOfValues * 2).ToArray();
}
}
我必须进行大量聚合操作,输出按某个维度 (int/byte ID) 分组。我正在使用 C#,但希望我仍然可以从阅读这篇文章的大多数 C++ 人群那里得到好的建议 :)
简化版如下:
public static (double[], double[]) AggregateDataGroupBy(double[] data, double[] weight, byte[] dimension)
{
int numberOfValues = byte.MaxValue - byte.MinValue + 1;
double[] totalValue = new double[numberOfValues];
double[] totalWeight = new double[numberOfValues];
for (int i = 0; i < data.Length; i++)
{
byte index = dimension[i];
totalValue[index] += data[i];
totalWeight[index] += weight[i];
}
return (totalValue, totalWeight);
}
SIMD 矢量化在不需要对维度进行分组时提供显着的加速。我对向量化操作的第一次尝试是获取正在处理的 4 行维度的 运行 总数,使用收集加载输入向量,执行聚合函数,然后分散回来。由于散点图不是 AVX2 的一部分,因此最后一部分特别慢。
public static unsafe (double[], double[]) AggregateDataGather(double[] data, double[] weight, int[] dimension)
{
int numberOfValues = 256;
double[] totalValue = new double[numberOfValues];
double[] totalWeight = new double[numberOfValues];
if (Avx2.IsSupported)
{
int vectorSize = 256 / 8 / sizeof(double);
int i;
fixed (double* ptr = data, ptr2 = weight, ptrValue = totalValue, ptrWeight = totalWeight)
{
fixed (int* dimptr = dimension)
{
var accValue = stackalloc double[vectorSize];
var accWeight = stackalloc double[vectorSize];
for (i = 0; i <= data.Length - vectorSize; i += vectorSize)
{
Vector128<int> indices = Avx2.LoadVector128(dimptr + i);
var accVectorV = Avx2.GatherVector256(ptrValue, indices, 8);
var accVectorW = Avx2.GatherVector256(ptrWeight, indices, 8);
var v = Avx2.LoadVector256(ptr + i);
var w = Avx2.LoadVector256(ptr2 + i);
accVectorV = Avx2.Add(accVectorV, v);
accVectorW = Avx2.Add(accVectorW, w);
Avx2.Store(accValue, accVectorV);
Avx2.Store(accWeight, accVectorW);
for (int ii = 0; ii < vectorSize; ii++)
{
var index = dimptr[i + ii];
totalValue[index] = accValue[ii];
totalWeight[index] = accWeight[ii];
}
}
}
}
}
else if (Avx.IsSupported || Sse42.IsSupported)
{
// Do other stuff
}
return (totalValue, totalWeight);
}
(请原谅将维度从 byte 更改为 int - 我测试了两者,两者都比较慢)
上面的内在函数版本比我的 Ryzen 3600 上的原始算法运行慢。(100m 值是 268ms,而不是 230ms)
鉴于我的数据仅在多次聚合(超过 hundreds/thousands 不同维度)后才发生变化,我发现我最快的实现可以是将数据(值、权重)存储在一个向量中并进行简单的计算通过...分组。这在 Ryzen 上提供了类似的性能,但在较旧的 i7(没有 AVX)上快了 10%。
public static Vector2[] AggregateData(Vector2[] data, byte[] dimension)
{
int numberOfValues = byte.MaxValue - byte.MinValue + 1;
Vector2[] sum = new Vector2[numberOfValues];
for (int i = 0; i < data.Length; i++)
{
sum[dimension[i]] += data[i];
}
return sum;
}
我读过一些关于直方图函数的论文,这些函数只是简单地计算每个维度的出现次数。与幼稚的方法相比,他们获得了近乎完美的 8 倍加速。
我在尝试使用 AVX2 内在函数时是否遗漏了什么?我是否总是要面对低效的 gather/scatter 操作?任何 comments/suggestions?
作为一个子案例,是否有仅在维度大小较小(一次处理 4 个维度值)时才有效的策略?例如。将值加载到具有单个非零值的向量中,如下所示,并优化一次处理的行数以使用所有缓存内存。
Values (11, 12, 13, 14, 15, 16, 17)
Indicies (1, 0, 3, 1, 2, 0, 3)
=>
<0, 11, 0, 0>
+ <12, 0, 0, 0>
+ <0, 0, 0, 13>
+ <0, 14, 0, 0>
+ <0, 0, 15, 0>
+ <16, 0, 0, 0>
+ <0, 0, 0, 17>
(在我看来,这不是一个可能的解决方案,因为一旦尺寸大小增加效率低下。所以我还没有尝试过,但如果有人建议它作为一种有效的解决方法,我会尝试。)
如评论中所述,矢量化对于此类聚合用例来说很难。但是,这并不意味着 SIMD 对您的问题完全没有用。试试这个版本(未经测试)。
主要思想,此版本节省了 50% 的随机 loads/stores 更新累加器。它在内存中交错累加器,使用 128 位 load/add/store 指令,并在消耗所有输入值后将结果拆分回 2 个 C# 数组。
static unsafe void aggregateSse2( double* accumulators, double* a, double* b, byte* dimension, int count )
{
Debug.Assert( count >= 0 );
double* aEnd = a + ( count & ( ~1 ) );
while( a < aEnd )
{
// Load accumulator corresponding to the first bucket
double* accPointer = accumulators + ( 2u * dimension[ 0 ] );
Vector128<double> acc = Sse2.LoadAlignedVector128( accPointer );
// Load 2 values from each input array.
// BTW, possible to use AVX and unroll by 4 instead of 2, using GetLow/GetHigh to extract the 16-byte pieces.
// Gonna save a bit of loads at the cost of more shuffles, might be slightly faster overall.
Vector128<double> va = Sse2.LoadVector128( a );
Vector128<double> vb = Sse2.LoadVector128( b );
// Increment accumulator with the first value of each array, store back to RAM
acc = Sse2.Add( acc, Sse2.UnpackLow( va, vb ) );
Sse2.StoreAligned( accPointer, acc );
// Load accumulator corresponding to the second bucket.
// Potentially it might be the same pointer, can't load both in advance.
accPointer = accumulators + ( 2u * dimension[ 1 ] );
acc = Sse2.LoadAlignedVector128( accPointer );
a += 2;
b += 2;
dimension += 2;
// Increment accumulator with the second value of each array, store back to RAM
acc = Sse2.Add( acc, Sse2.UnpackHigh( va, vb ) );
Sse2.StoreAligned( accPointer, acc );
}
if( 0 != ( count & 1 ) )
{
// The input size was odd number, one item left at these pointers.
// Load a scalar from first input array into lower lane of a vector
Vector128<double> vec = Sse2.LoadScalarVector128( a );
// Load the accumulator corresponding to the bucket
double* accPointer = accumulators + ( 2u * dimension[ 0 ] );
Vector128<double> acc = Sse2.LoadAlignedVector128( accPointer );
// Load scalar from second input array into higher lane of that vector
vec = Sse2.LoadHigh( vec, b );
// Increment accumulator and store back to RAM
acc = Sse2.Add( acc, vec );
Sse2.StoreAligned( accPointer, acc );
}
}
static unsafe void splitAccumulators( double* values, double* weights, double* accumulators, int numberOfValues )
{
double* end = accumulators + numberOfValues * 2;
while( accumulators < end )
{
Vector128<double> vec = Sse2.LoadAlignedVector128( accumulators );
accumulators += 2;
Sse2.StoreScalar( values, vec );
values++;
Sse2.StoreHigh( weights, vec );
weights++;
}
}
/// <summary>Align pointer by 16 bytes, rounding up.</summary>
[MethodImpl( MethodImplOptions.AggressiveInlining )]
static unsafe void* roundUpBy16( void* pointer )
{
if( Environment.Is64BitProcess ) // This branch is missing from JIT output BTW, it's free.
{
long a = (long)pointer;
a = ( a + 15L ) & ( -16L );
return (void*)a;
}
else
{
int a = (int)pointer;
a = ( a + 15 ) & ( -16 );
return (void*)a;
}
}
[SkipLocalsInit] // Otherwise the runtime gonna zero-initialize the stack allocated buffer, very slowly with `push 0` instructions in a loop.
public static (double[], double[]) AggregateDataSse2( double[] data, double[] weight, byte[] dimension )
{
Debug.Assert( data.Length == weight.Length && data.Length == dimension.Length );
const int numberOfValues = 0x100;
unsafe
{
// The buffer is about 4kb RAM, fits in L1D cache.
// Allocating 2 extra doubles (16 extra bytes) to align the pointer.
double* accumulators = stackalloc double[ ( numberOfValues * 2 ) + 2 ];
// Align by 16 bytes
accumulators = (double*)roundUpBy16( accumulators );
// Clear accumulators with zeros, let's hope the implementation of that standard library method is good.
new Span<double>( accumulators, numberOfValues * 2 ).Fill( 0 );
// Process the input data
fixed( double* a = data )
fixed( double* b = weight )
fixed( byte* dim = dimension )
aggregateSse2( accumulators, a, b, dim, data.Length );
// Split the result into 2 arrays
double[] totalValue = new double[ numberOfValues ];
double[] totalWeight = new double[ numberOfValues ];
fixed( double* values = totalValue )
fixed( double* weights = totalWeight )
splitAccumulators( values, weights, accumulators, numberOfValues );
return (totalValue, totalWeight);
}
}
它只使用 SSE2,因为它不太需要更宽的,但与您的标量版本相比,它仍然应该节省大量的指令和 RAM 事务。我希望所有计算机都有一些可衡量的改进。
[抱歉,如果这应该是评论,而不是答案,但不确定如何在评论中格式化代码]
Soonts 使用交错输入和输出的答案版本如下。然而,它最终看起来等同于 Vector2 结构,除了使用双精度而不是浮点数。有趣的是,这个版本比原来的答案慢。
下面是原始答案(SSE2)、优化答案(SSE2a)、我的“交错”版本(SSE2i)和 Vector2(基于浮点数)结果(用于比较)的结果。
| Type | Method | Seed | Size | Mean | Error | StdDev |
|------------------------------- |------------------- |----- |---------- |---------:|--------:|--------:|
| AggregatorBenchmarkDoubleSSE2 | AggregateBenchmark | 42 | 100000000 | 288.3 ms | 2.25 ms | 2.10 ms |
| AggregatorBenchmarkDoubleSSE2a | AggregateBenchmark | 42 | 100000000 | 275.6 ms | 5.31 ms | 5.45 ms |
| AggregatorBenchmarkDoubleSSE2i | AggregateBenchmark | 42 | 100000000 | 280.3 ms | 2.69 ms | 2.52 ms |
| AggregatorBenchmarkVector2 | AggregateBenchmark | 42 | 100000000 | 259.9 ms | 1.48 ms | 1.31 ms |
static unsafe void aggregateSse2i(double* accumulators, double* a, byte* dimension, int count)
{
Debug.Assert(count >= 0);
double* aEnd = a + 2 * count;
while (a < aEnd)
{
// Load accumulator corresponding to the first bucket
double* accPointer = accumulators + (2u * *dimension);
Vector128<double> acc = Sse2.LoadAlignedVector128(accPointer);
dimension++;
// Load pair of values from input array.
// BTW, possible to use AVX and unroll by 4 instead of 2, using GetLow/GetHigh to extract the 16-byte pieces.
// Gonna save a bit of loads at the cost of more shuffles, might be slightly faster overall.
Vector128<double> va = Sse2.LoadVector128(a);
a += 2;
// Increment accumulator with the first value of each array, store back to RAM
acc = Sse2.Add(acc, va);
Sse2.StoreAligned(accPointer, acc);
}
}
static unsafe void splitAccumulators2(double* values, double* accumulators, int numberOfValues)
{
double* end = accumulators + numberOfValues * 2;
while (accumulators < end)
{
Vector128<double> vec = Sse2.LoadAlignedVector128(accumulators);
accumulators += 2;
Sse2.Store(values, vec);
values += 2;
}
}
[SkipLocalsInit] // Otherwise the runtime gonna zero-initialize the stack allocated buffer, very slowly with `push 0` instructions in a loop.
public static double[] AggregateDataSse2i(double[] data, byte[] dimension)
{
Debug.Assert(data.Length == 2 * dimension.Length);
const int numberOfValues = 0x100;
unsafe
{
// The buffer is about 4kb RAM, fits in L1D cache.
// Allocating 2 extra doubles (16 extra bytes) to align the pointer.
double* accumulators = stackalloc double[(numberOfValues * 2) + 2];
// Align by 16 bytes
accumulators = (double*)roundUpBy16(accumulators);
// Clear accumulators with zeros, let's hope the implementation of that standard library method is good.
new Span<double>(accumulators, numberOfValues * 2).Fill(0);
// Process the input data
fixed (double* a = data)
fixed (byte* dim = dimension)
aggregateSse2i(accumulators, a, dim, dimension.Length);
return new Span<double>(accumulators, numberOfValues * 2).ToArray();
}
}