在优化 C 中的分段素筛时卡住了
Stuck while optimizing a segmented prime sieve in C
我正在尝试在 C 中实现一个高效的分段素数筛法。它基本上是埃拉托色尼筛法,但每个段都被分割成适合缓存的大小。
在我的版本中,有一个标志位数组,其中每一位都是一个连续的奇数。当每个位是已知素数的倍数时,通过使用 AND
屏蔽来擦除每个位。
这一部分代码消耗了大约 90% 的 运行 时间。每一个脏代码都有它的原因,我在注释中解释了,但整体操作非常简单。
- 抢一个素数。
- 计算其比缓存块起点所代表的数稍大的平方和倍数
- 拿大一点的。
- 擦除该位,将基素数与自身相加两次,重复直到缓存块结束。
就是这样。
有一个叫做primesieve
的程序可以非常快速地完成这个操作。它比我的版本快大约 3 倍。我阅读了它关于算法及其代码的文档,并将任何合理的应用到我的代码中。
因为有一个已知的程序比我的快很多,我将进一步调查他们在做什么以及我没有做什么,但在此之前,我post编辑了这个问题以获得额外的帮助如果你能帮我找出哪一部分没有有效地 运行ning。
再说一遍,这个单一的例程消耗了 90% 的 运行 时间,所以我真的很专注于使这部分 运行 更快。
这是旧版本,我在post之后做了一些修改,那个在这个下面。评论仍然适用。
#include <stdlib.h>
#include <stdio.h>
//size of the cache block (64K)
#define C 0x10000
static unsigned sq(unsigned a) {
return a * a;
}
//`f` is the array of flags. `st` is the starting point being a multiple of
//`C * 16`. Each byte can hold 16 odd numbers, thus having `C` multiplied by 16.
//`p` is the array of prime numbers from 67 up to `sqrt(st + C * 16)`. Primes
//below 67 are handled specially by other routines.
__attribute__((noinline)) //Don't inline for testing.
static void sieve_bit(unsigned *f, unsigned st, unsigned *p) {
//Doing table access than computing the mask in runtime was about 15% faster
//on my machine. I'm guessing that probably the bit-shifts on x86 only being
//able to use the CL register as a variable counter creates a dependency
//when there is a series of shifts, making it slower than multiple L1 cache
//accesses.
static const unsigned m[] = {
~(1u << 0), ~(1u << 1), ~(1u << 2), ~(1u << 3), ~(1u << 4),
~(1u << 5), ~(1u << 6), ~(1u << 7), ~(1u << 8), ~(1u << 9),
~(1u << 10), ~(1u << 11), ~(1u << 12), ~(1u << 13), ~(1u << 14),
~(1u << 15), ~(1u << 16), ~(1u << 17), ~(1u << 18), ~(1u << 19),
~(1u << 20), ~(1u << 21), ~(1u << 22), ~(1u << 23), ~(1u << 24),
~(1u << 25), ~(1u << 26), ~(1u << 27), ~(1u << 28), ~(1u << 29),
~(1u << 30), ~(1u << 31)
};
unsigned p2 = sq(*p);
do {
unsigned r = st % *p;
//This calculates the starting point. The result of `n` will be an odd
//number multiple of `*p` a bit larger than `st`.
unsigned n = st + *p - r + (-(r & 1) & *p);
//If the square of `*p` is larger than `n`, use it instead.
if (p2 > n) {
n = p2;
}
//The loop is unrolled 8 times, which gave the fastest result on my
//machine.
for (;; n += *p * 16) {
//Jumps to the next stage when there are no space left for 8
//operations to be done. This could simply be a `break` followed by
//a loop, but this was slightly (1~3%) faster than the simpler
//alternative.
int d = st + C * 16 - n;
//as mentioned in the comments, the value of `d` relies on
//implementation dependent behaviour for 2's complement machines.
//`n` can be larger than `st + C * 16`, in which case the wrapped
//unsigned value is converted to a negative `int`.
if (d <= (int)*p * 14) {
if (d <= (int)*p * 6) {
if (d <= (int)*p * 2) {
if (d <= (int)*p * 0) goto L0; else goto L1;
} else {
if (d <= (int)*p * 4) goto L2; else goto L3;
}
} else {
if (d <= (int)*p * 10) {
if (d <= (int)*p * 8) goto L4; else goto L5;
} else {
if (d <= (int)*p * 12) goto L6; else goto L7;
}
}
}
//The multiples of primes are erased.
#define ur(i) do {\
unsigned _n = (n - st) / 2 + *p * i;\
f[_n / 32] &= m[_n % 32];\
} while (0)
ur(0); ur(1); ur(2); ur(3); ur(4); ur(5); ur(6); ur(7);
}
//Erase with the leftovers.
L7: ur(6);
L6: ur(5);
L5: ur(4);
L4: ur(3);
L3: ur(2);
L2: ur(1);
L1: ur(0);
L0:
p2 = sq(*++p);
#undef ur
} while (p2 < st + C * 16);
}
//This could break if `TscInvariant` CPUID is false, which is probably rare on
//modern machines?
static inline unsigned long long rdtscp() {
unsigned _;
return __builtin_ia32_rdtscp(&_);
}
//This isn't used in actuall code. Just a simple one for a test. Fill with prime
//numbers enough to sieve all 32-bit primes.
static inline void fillPrimes(unsigned *p) {
for (unsigned n = 67, c = 2; n <= 65521; n += c ^= 6) {
for (unsigned d = 5, c = 4; d * d <= n; d += c ^= 6) {
if (!(n % d)) goto next;
}
*p++ = n;
next:;
}
}
int main() {
unsigned p[8000];
fillPrimes(p);
unsigned f[C / sizeof(unsigned)];
puts("start sieve");
unsigned long long c = rdtscp();
for (int i = 0; i < 2000; ++i) {
volatile unsigned *vf = f;
sieve_bit((unsigned *)vf, C * 16 * i, p);
}
c = rdtscp() - c;
printf("%llu\n", c);
return 0;
}
休息了一下,发现循环里面有一些多余的计算,把它们去掉,速度提高了6~7%左右。
static void sieve_bit(unsigned *f, unsigned st, unsigned *p) {
static const unsigned m[] = {
~(1u << 0), ~(1u << 1), ~(1u << 2), ~(1u << 3), ~(1u << 4),
~(1u << 5), ~(1u << 6), ~(1u << 7), ~(1u << 8), ~(1u << 9),
~(1u << 10), ~(1u << 11), ~(1u << 12), ~(1u << 13), ~(1u << 14),
~(1u << 15), ~(1u << 16), ~(1u << 17), ~(1u << 18), ~(1u << 19),
~(1u << 20), ~(1u << 21), ~(1u << 22), ~(1u << 23), ~(1u << 24),
~(1u << 25), ~(1u << 26), ~(1u << 27), ~(1u << 28), ~(1u << 29),
~(1u << 30), ~(1u << 31)
};
unsigned p2 = sq(*p);
do {
unsigned n =
(p2 > st + *p * 2 ? p2 - st : *p - st % *p + (-(st % *p & 1) & *p)) / 2;
for (;; n += *p * 8) {
int d = C * 8 - (int)n;
if (d <= (int)*p * 7) {
if (d <= (int)*p * 3) {
if (d <= (int)*p * 1) {
if (d <= (int)*p * 0) goto L0; else goto L1;
} else {
if (d <= (int)*p * 2) goto L2; else goto L3;
}
} else {
if (d <= (int)*p * 5) {
if (d <= (int)*p * 4) goto L4; else goto L5;
} else {
if (d <= (int)*p * 6) goto L6; else goto L7;
}
}
}
#define ur(i) f[(n + *p * i) / 32] &= m[(n + *p * i) % 32]
ur(0); ur(1); ur(2); ur(3); ur(4); ur(5); ur(6); ur(7);
}
L7: ur(6);
L6: ur(5);
L5: ur(4);
L4: ur(3);
L3: ur(2);
L2: ur(1);
L1: ur(0);
L0:
p2 = sq(*++p);
#undef ur
} while (p2 < st + C * 16);
}
你可能在筛选,但数数呢?还有一个上限,这样可以比较吗? OMP 喜欢 primesieve
?
你被卡住了,因为你甚至没有计算或比较,只与自己。
我用一个 30Kb char
数组做了一个分段筛。在 20 亿次时,它需要的时间正好是 primesieve
的 3 倍,并且可以与 OMP 一起使用。所以你所有的位映射和展开都是不可测量的。
$ time primesieve 2000000000
Sieve size = 128 KiB
Threads = 8
100%
Seconds: 0.089
Primes: 98222287
real 0m0.094s
user 0m0.655s
sys 0m0.013s
A non-sieve(在这个数字下甚至不是多线程):
$ time primecount 2000000000
98222287
real 0m0.087s
user 0m0.018s
sys 0m0.013s
我的简单炭筛:
$ time ./a.out 2000000000
PI (prime count) of
2000000000 (2e+09):
98222287
real 0m0.311s
user 0m2.456s
sys 0m0.004s
在 8 个线程上“出汗”。没有限制,还有结果,0.311s
是什么意思?
我添加了一个简单的基准测试框架来测试各种方法。事实证明展开并没有提高性能。原因可能是现代处理器预测分支如此简单的循环 运行s 多次可以全速 运行,对于小素数就是这种情况,并且立即中断的相同简单循环也是正确的预测一旦素数变得足够大。
这是在我的笔记本电脑上具有相同性能的简化版本:
static void sieve_bit_3(unsigned *f, unsigned st, unsigned *pp) {
static const unsigned m[] = {
~(1u << 0), ~(1u << 1), ~(1u << 2), ~(1u << 3), ~(1u << 4),
~(1u << 5), ~(1u << 6), ~(1u << 7), ~(1u << 8), ~(1u << 9),
~(1u << 10), ~(1u << 11), ~(1u << 12), ~(1u << 13), ~(1u << 14),
~(1u << 15), ~(1u << 16), ~(1u << 17), ~(1u << 18), ~(1u << 19),
~(1u << 20), ~(1u << 21), ~(1u << 22), ~(1u << 23), ~(1u << 24),
~(1u << 25), ~(1u << 26), ~(1u << 27), ~(1u << 28), ~(1u << 29),
~(1u << 30), ~(1u << 31)
};
unsigned p = *pp;
unsigned p2 = sq(p);
do {
unsigned n = (p2 > st + p * 2 ? p2 - st : p - st % p + (-(st % p & 1) & p)) / 2;
for (; n < C * 8; n += p) {
f[n / 32] &= m[n % 32];
}
p2 = sq(p = *++pp);
} while (p2 < st + C * 16);
}
这里是修改后的版本,测试更少,速度更快:
static void sieve_bit_6(unsigned *f, unsigned st, unsigned *pp) {
static const unsigned m[] = {
~(1u << 0), ~(1u << 1), ~(1u << 2), ~(1u << 3), ~(1u << 4),
~(1u << 5), ~(1u << 6), ~(1u << 7), ~(1u << 8), ~(1u << 9),
~(1u << 10), ~(1u << 11), ~(1u << 12), ~(1u << 13), ~(1u << 14),
~(1u << 15), ~(1u << 16), ~(1u << 17), ~(1u << 18), ~(1u << 19),
~(1u << 20), ~(1u << 21), ~(1u << 22), ~(1u << 23), ~(1u << 24),
~(1u << 25), ~(1u << 26), ~(1u << 27), ~(1u << 28), ~(1u << 29),
~(1u << 30), ~(1u << 31)
};
unsigned p, p2;
while (p = *pp++, (p2 = sq(p)) <= st + 2 * p) {
unsigned mod = st % p;
unsigned n = (p - mod + (-(mod & 1) & p)) / 2;
for (; n < C * 8; n += p) {
f[n / 32] &= m[n % 32];
}
}
while (p2 < st + C * 16) {
unsigned n = (p2 - st) / 2;
for (; n < C * 8; n += p) {
f[n / 32] &= m[n % 32];
}
p2 = sq(p = *pp++);
}
}
我正在尝试在 C 中实现一个高效的分段素数筛法。它基本上是埃拉托色尼筛法,但每个段都被分割成适合缓存的大小。
在我的版本中,有一个标志位数组,其中每一位都是一个连续的奇数。当每个位是已知素数的倍数时,通过使用 AND
屏蔽来擦除每个位。
这一部分代码消耗了大约 90% 的 运行 时间。每一个脏代码都有它的原因,我在注释中解释了,但整体操作非常简单。
- 抢一个素数。
- 计算其比缓存块起点所代表的数稍大的平方和倍数
- 拿大一点的。
- 擦除该位,将基素数与自身相加两次,重复直到缓存块结束。
就是这样。
有一个叫做primesieve
的程序可以非常快速地完成这个操作。它比我的版本快大约 3 倍。我阅读了它关于算法及其代码的文档,并将任何合理的应用到我的代码中。
因为有一个已知的程序比我的快很多,我将进一步调查他们在做什么以及我没有做什么,但在此之前,我post编辑了这个问题以获得额外的帮助如果你能帮我找出哪一部分没有有效地 运行ning。
再说一遍,这个单一的例程消耗了 90% 的 运行 时间,所以我真的很专注于使这部分 运行 更快。
这是旧版本,我在post之后做了一些修改,那个在这个下面。评论仍然适用。
#include <stdlib.h>
#include <stdio.h>
//size of the cache block (64K)
#define C 0x10000
static unsigned sq(unsigned a) {
return a * a;
}
//`f` is the array of flags. `st` is the starting point being a multiple of
//`C * 16`. Each byte can hold 16 odd numbers, thus having `C` multiplied by 16.
//`p` is the array of prime numbers from 67 up to `sqrt(st + C * 16)`. Primes
//below 67 are handled specially by other routines.
__attribute__((noinline)) //Don't inline for testing.
static void sieve_bit(unsigned *f, unsigned st, unsigned *p) {
//Doing table access than computing the mask in runtime was about 15% faster
//on my machine. I'm guessing that probably the bit-shifts on x86 only being
//able to use the CL register as a variable counter creates a dependency
//when there is a series of shifts, making it slower than multiple L1 cache
//accesses.
static const unsigned m[] = {
~(1u << 0), ~(1u << 1), ~(1u << 2), ~(1u << 3), ~(1u << 4),
~(1u << 5), ~(1u << 6), ~(1u << 7), ~(1u << 8), ~(1u << 9),
~(1u << 10), ~(1u << 11), ~(1u << 12), ~(1u << 13), ~(1u << 14),
~(1u << 15), ~(1u << 16), ~(1u << 17), ~(1u << 18), ~(1u << 19),
~(1u << 20), ~(1u << 21), ~(1u << 22), ~(1u << 23), ~(1u << 24),
~(1u << 25), ~(1u << 26), ~(1u << 27), ~(1u << 28), ~(1u << 29),
~(1u << 30), ~(1u << 31)
};
unsigned p2 = sq(*p);
do {
unsigned r = st % *p;
//This calculates the starting point. The result of `n` will be an odd
//number multiple of `*p` a bit larger than `st`.
unsigned n = st + *p - r + (-(r & 1) & *p);
//If the square of `*p` is larger than `n`, use it instead.
if (p2 > n) {
n = p2;
}
//The loop is unrolled 8 times, which gave the fastest result on my
//machine.
for (;; n += *p * 16) {
//Jumps to the next stage when there are no space left for 8
//operations to be done. This could simply be a `break` followed by
//a loop, but this was slightly (1~3%) faster than the simpler
//alternative.
int d = st + C * 16 - n;
//as mentioned in the comments, the value of `d` relies on
//implementation dependent behaviour for 2's complement machines.
//`n` can be larger than `st + C * 16`, in which case the wrapped
//unsigned value is converted to a negative `int`.
if (d <= (int)*p * 14) {
if (d <= (int)*p * 6) {
if (d <= (int)*p * 2) {
if (d <= (int)*p * 0) goto L0; else goto L1;
} else {
if (d <= (int)*p * 4) goto L2; else goto L3;
}
} else {
if (d <= (int)*p * 10) {
if (d <= (int)*p * 8) goto L4; else goto L5;
} else {
if (d <= (int)*p * 12) goto L6; else goto L7;
}
}
}
//The multiples of primes are erased.
#define ur(i) do {\
unsigned _n = (n - st) / 2 + *p * i;\
f[_n / 32] &= m[_n % 32];\
} while (0)
ur(0); ur(1); ur(2); ur(3); ur(4); ur(5); ur(6); ur(7);
}
//Erase with the leftovers.
L7: ur(6);
L6: ur(5);
L5: ur(4);
L4: ur(3);
L3: ur(2);
L2: ur(1);
L1: ur(0);
L0:
p2 = sq(*++p);
#undef ur
} while (p2 < st + C * 16);
}
//This could break if `TscInvariant` CPUID is false, which is probably rare on
//modern machines?
static inline unsigned long long rdtscp() {
unsigned _;
return __builtin_ia32_rdtscp(&_);
}
//This isn't used in actuall code. Just a simple one for a test. Fill with prime
//numbers enough to sieve all 32-bit primes.
static inline void fillPrimes(unsigned *p) {
for (unsigned n = 67, c = 2; n <= 65521; n += c ^= 6) {
for (unsigned d = 5, c = 4; d * d <= n; d += c ^= 6) {
if (!(n % d)) goto next;
}
*p++ = n;
next:;
}
}
int main() {
unsigned p[8000];
fillPrimes(p);
unsigned f[C / sizeof(unsigned)];
puts("start sieve");
unsigned long long c = rdtscp();
for (int i = 0; i < 2000; ++i) {
volatile unsigned *vf = f;
sieve_bit((unsigned *)vf, C * 16 * i, p);
}
c = rdtscp() - c;
printf("%llu\n", c);
return 0;
}
休息了一下,发现循环里面有一些多余的计算,把它们去掉,速度提高了6~7%左右。
static void sieve_bit(unsigned *f, unsigned st, unsigned *p) {
static const unsigned m[] = {
~(1u << 0), ~(1u << 1), ~(1u << 2), ~(1u << 3), ~(1u << 4),
~(1u << 5), ~(1u << 6), ~(1u << 7), ~(1u << 8), ~(1u << 9),
~(1u << 10), ~(1u << 11), ~(1u << 12), ~(1u << 13), ~(1u << 14),
~(1u << 15), ~(1u << 16), ~(1u << 17), ~(1u << 18), ~(1u << 19),
~(1u << 20), ~(1u << 21), ~(1u << 22), ~(1u << 23), ~(1u << 24),
~(1u << 25), ~(1u << 26), ~(1u << 27), ~(1u << 28), ~(1u << 29),
~(1u << 30), ~(1u << 31)
};
unsigned p2 = sq(*p);
do {
unsigned n =
(p2 > st + *p * 2 ? p2 - st : *p - st % *p + (-(st % *p & 1) & *p)) / 2;
for (;; n += *p * 8) {
int d = C * 8 - (int)n;
if (d <= (int)*p * 7) {
if (d <= (int)*p * 3) {
if (d <= (int)*p * 1) {
if (d <= (int)*p * 0) goto L0; else goto L1;
} else {
if (d <= (int)*p * 2) goto L2; else goto L3;
}
} else {
if (d <= (int)*p * 5) {
if (d <= (int)*p * 4) goto L4; else goto L5;
} else {
if (d <= (int)*p * 6) goto L6; else goto L7;
}
}
}
#define ur(i) f[(n + *p * i) / 32] &= m[(n + *p * i) % 32]
ur(0); ur(1); ur(2); ur(3); ur(4); ur(5); ur(6); ur(7);
}
L7: ur(6);
L6: ur(5);
L5: ur(4);
L4: ur(3);
L3: ur(2);
L2: ur(1);
L1: ur(0);
L0:
p2 = sq(*++p);
#undef ur
} while (p2 < st + C * 16);
}
你可能在筛选,但数数呢?还有一个上限,这样可以比较吗? OMP 喜欢 primesieve
?
你被卡住了,因为你甚至没有计算或比较,只与自己。
我用一个 30Kb char
数组做了一个分段筛。在 20 亿次时,它需要的时间正好是 primesieve
的 3 倍,并且可以与 OMP 一起使用。所以你所有的位映射和展开都是不可测量的。
$ time primesieve 2000000000
Sieve size = 128 KiB
Threads = 8
100%
Seconds: 0.089
Primes: 98222287
real 0m0.094s
user 0m0.655s
sys 0m0.013s
A non-sieve(在这个数字下甚至不是多线程):
$ time primecount 2000000000
98222287
real 0m0.087s
user 0m0.018s
sys 0m0.013s
我的简单炭筛:
$ time ./a.out 2000000000
PI (prime count) of
2000000000 (2e+09):
98222287
real 0m0.311s
user 0m2.456s
sys 0m0.004s
在 8 个线程上“出汗”。没有限制,还有结果,0.311s
是什么意思?
我添加了一个简单的基准测试框架来测试各种方法。事实证明展开并没有提高性能。原因可能是现代处理器预测分支如此简单的循环 运行s 多次可以全速 运行,对于小素数就是这种情况,并且立即中断的相同简单循环也是正确的预测一旦素数变得足够大。
这是在我的笔记本电脑上具有相同性能的简化版本:
static void sieve_bit_3(unsigned *f, unsigned st, unsigned *pp) {
static const unsigned m[] = {
~(1u << 0), ~(1u << 1), ~(1u << 2), ~(1u << 3), ~(1u << 4),
~(1u << 5), ~(1u << 6), ~(1u << 7), ~(1u << 8), ~(1u << 9),
~(1u << 10), ~(1u << 11), ~(1u << 12), ~(1u << 13), ~(1u << 14),
~(1u << 15), ~(1u << 16), ~(1u << 17), ~(1u << 18), ~(1u << 19),
~(1u << 20), ~(1u << 21), ~(1u << 22), ~(1u << 23), ~(1u << 24),
~(1u << 25), ~(1u << 26), ~(1u << 27), ~(1u << 28), ~(1u << 29),
~(1u << 30), ~(1u << 31)
};
unsigned p = *pp;
unsigned p2 = sq(p);
do {
unsigned n = (p2 > st + p * 2 ? p2 - st : p - st % p + (-(st % p & 1) & p)) / 2;
for (; n < C * 8; n += p) {
f[n / 32] &= m[n % 32];
}
p2 = sq(p = *++pp);
} while (p2 < st + C * 16);
}
这里是修改后的版本,测试更少,速度更快:
static void sieve_bit_6(unsigned *f, unsigned st, unsigned *pp) {
static const unsigned m[] = {
~(1u << 0), ~(1u << 1), ~(1u << 2), ~(1u << 3), ~(1u << 4),
~(1u << 5), ~(1u << 6), ~(1u << 7), ~(1u << 8), ~(1u << 9),
~(1u << 10), ~(1u << 11), ~(1u << 12), ~(1u << 13), ~(1u << 14),
~(1u << 15), ~(1u << 16), ~(1u << 17), ~(1u << 18), ~(1u << 19),
~(1u << 20), ~(1u << 21), ~(1u << 22), ~(1u << 23), ~(1u << 24),
~(1u << 25), ~(1u << 26), ~(1u << 27), ~(1u << 28), ~(1u << 29),
~(1u << 30), ~(1u << 31)
};
unsigned p, p2;
while (p = *pp++, (p2 = sq(p)) <= st + 2 * p) {
unsigned mod = st % p;
unsigned n = (p - mod + (-(mod & 1) & p)) / 2;
for (; n < C * 8; n += p) {
f[n / 32] &= m[n % 32];
}
}
while (p2 < st + C * 16) {
unsigned n = (p2 - st) / 2;
for (; n < C * 8; n += p) {
f[n / 32] &= m[n % 32];
}
p2 = sq(p = *pp++);
}
}