c ++ - 运行时计时功能和命令行时间之间的区别
c++ - difference between runtime timing function and command line time
我有以下代码以及来自 and the libbsc from https://github.com/IlyaGrebnov/libbsc 的计时功能。
#include <vector>
#include <iostream>
#include <random>
#include <cassert>
#include <sys/time.h>
#include "libbsc/libbsc/libbsc.h"
typedef unsigned long long timestamp_t;
static timestamp_t
get_timestamp ()
{
struct timeval now;
gettimeofday (&now, NULL);
return now.tv_usec + (timestamp_t)now.tv_sec * 1000000;
}
int main() {
int min = 0;
int max = 1800;
std::vector<int> rand_vector;
int bsc_length;
int bsc_status;
double secs_bsc;
unsigned char* bsc_out_buffer;
// generate random values
for (int i=1; i<262144; ++i) rand_vector.push_back(min + (rand() % (int)(max - min + 1))); // 262144 random_integers
char* rand_buffer = reinterpret_cast<char*> (&rand_vector[0]);
int rand_length = sizeof (int)*rand_vector.size();
bsc_out_buffer = (unsigned char*) malloc (rand_length + LIBBSC_HEADER_SIZE);
const unsigned char* bsc_in_buffer = reinterpret_cast<const unsigned char*> (rand_buffer);
bsc_status = bsc_init(LIBBSC_FEATURE_FASTMODE);
if (bsc_status == LIBBSC_NO_ERROR){
timestamp_t t0_bsc = get_timestamp();
bsc_length = bsc_compress(bsc_in_buffer, // inbuffer
bsc_out_buffer, // outbuffer
rand_length, // length of inbuffer
LIBBSC_DEFAULT_LZPHASHSIZE, // int lzpHashSize,
LIBBSC_DEFAULT_LZPMINLEN, //int lzpMinLen,
LIBBSC_DEFAULT_BLOCKSORTER, //int blockSorter,
LIBBSC_CODER_QLFC_ADAPTIVE, //int coder,
LIBBSC_FEATURE_FASTMODE //int features
);
timestamp_t t1_bsc = get_timestamp();
secs_bsc = (t0_bsc - t1_bsc) / 1000000.0L;
}
else std::cout << "ERROR in bsc_init: " << bsc_status << std::endl;
std::cout << std::fixed << "bsc_compress runtime: " << secs_bsc << std::endl;
std::cout << "bsc_compress size: " << bsc_length << std::endl;
unsigned char* bsc_assert_buffer;
bsc_assert_buffer = (unsigned char*) malloc (rand_length);
bsc_status = bsc_decompress(bsc_out_buffer, bsc_length, bsc_assert_buffer, rand_length, LIBBSC_FEATURE_FASTMODE);
int* uncompress_values = (int*)bsc_assert_buffer;
for(int i = 0; i < rand_vector.size(); ++i) {
assert(uncompress_values[i] == rand_vector[i]);
}
}
编译它
g++ --std=c++11 test_bsc.cpp libbsc/libbsc.a -o bsc_test
并执行它
time ./bsc_test
输出是
bsc_compress runtime: 18446744073709.355469
bsc_compress size: 357178
real 0m0.392s
user 0m0.384s
sys 0m0.008s
任何人都可以向我解释其中的区别吗?计时功能的分辨率不够好?
如果我使用 zlib 压缩,我会得到类似 1.24294
作为具有相同计时函数和矢量大小的运行时。
关于 tiom
代码计算您选择的两点之间的时间差。
命令行时间计算可执行文件执行的总时间
bsc_compress runtime: 18446744073709.355469
作为一名程序员,您需要记住几个神奇的数字。 2的幂排在最前面,先背15、16、31、32、63、64。后者是一场比赛。这有助于您找到错误,您得到了一个被解释为无符号 64 位值的否定结果。修复:
secs_bsc = (t1_bsc - t0_bsc) / 1000000.0L;
Fwiw:您不能很好地比较这两个结果,OS 测量包括加载可执行文件和启动 CRT 所需的时间。
我有以下代码以及来自 and the libbsc from https://github.com/IlyaGrebnov/libbsc 的计时功能。
#include <vector>
#include <iostream>
#include <random>
#include <cassert>
#include <sys/time.h>
#include "libbsc/libbsc/libbsc.h"
typedef unsigned long long timestamp_t;
static timestamp_t
get_timestamp ()
{
struct timeval now;
gettimeofday (&now, NULL);
return now.tv_usec + (timestamp_t)now.tv_sec * 1000000;
}
int main() {
int min = 0;
int max = 1800;
std::vector<int> rand_vector;
int bsc_length;
int bsc_status;
double secs_bsc;
unsigned char* bsc_out_buffer;
// generate random values
for (int i=1; i<262144; ++i) rand_vector.push_back(min + (rand() % (int)(max - min + 1))); // 262144 random_integers
char* rand_buffer = reinterpret_cast<char*> (&rand_vector[0]);
int rand_length = sizeof (int)*rand_vector.size();
bsc_out_buffer = (unsigned char*) malloc (rand_length + LIBBSC_HEADER_SIZE);
const unsigned char* bsc_in_buffer = reinterpret_cast<const unsigned char*> (rand_buffer);
bsc_status = bsc_init(LIBBSC_FEATURE_FASTMODE);
if (bsc_status == LIBBSC_NO_ERROR){
timestamp_t t0_bsc = get_timestamp();
bsc_length = bsc_compress(bsc_in_buffer, // inbuffer
bsc_out_buffer, // outbuffer
rand_length, // length of inbuffer
LIBBSC_DEFAULT_LZPHASHSIZE, // int lzpHashSize,
LIBBSC_DEFAULT_LZPMINLEN, //int lzpMinLen,
LIBBSC_DEFAULT_BLOCKSORTER, //int blockSorter,
LIBBSC_CODER_QLFC_ADAPTIVE, //int coder,
LIBBSC_FEATURE_FASTMODE //int features
);
timestamp_t t1_bsc = get_timestamp();
secs_bsc = (t0_bsc - t1_bsc) / 1000000.0L;
}
else std::cout << "ERROR in bsc_init: " << bsc_status << std::endl;
std::cout << std::fixed << "bsc_compress runtime: " << secs_bsc << std::endl;
std::cout << "bsc_compress size: " << bsc_length << std::endl;
unsigned char* bsc_assert_buffer;
bsc_assert_buffer = (unsigned char*) malloc (rand_length);
bsc_status = bsc_decompress(bsc_out_buffer, bsc_length, bsc_assert_buffer, rand_length, LIBBSC_FEATURE_FASTMODE);
int* uncompress_values = (int*)bsc_assert_buffer;
for(int i = 0; i < rand_vector.size(); ++i) {
assert(uncompress_values[i] == rand_vector[i]);
}
}
编译它
g++ --std=c++11 test_bsc.cpp libbsc/libbsc.a -o bsc_test
并执行它
time ./bsc_test
输出是
bsc_compress runtime: 18446744073709.355469
bsc_compress size: 357178
real 0m0.392s
user 0m0.384s
sys 0m0.008s
任何人都可以向我解释其中的区别吗?计时功能的分辨率不够好?
如果我使用 zlib 压缩,我会得到类似 1.24294
作为具有相同计时函数和矢量大小的运行时。
关于 tiom
代码计算您选择的两点之间的时间差。
命令行时间计算可执行文件执行的总时间
bsc_compress runtime: 18446744073709.355469
作为一名程序员,您需要记住几个神奇的数字。 2的幂排在最前面,先背15、16、31、32、63、64。后者是一场比赛。这有助于您找到错误,您得到了一个被解释为无符号 64 位值的否定结果。修复:
secs_bsc = (t1_bsc - t0_bsc) / 1000000.0L;
Fwiw:您不能很好地比较这两个结果,OS 测量包括加载可执行文件和启动 CRT 所需的时间。