curl_multi_exec: 一些下载的图像丢失了一些数据/流不完整
curl_multi_exec: some images downloaded are missing some data / stream incomplete
我已经实现了一个 PHP 函数,它检查并下载大量图像(> 1'000)- 使用数组传递给它 - 使用 PHP curl_multi_init()
方法。
在修改了几次之后,因为我得到了 0 字节的文件等。我现在有一个解决方案可以下载所有图像 - 但下载的所有其他图像文件都不完整。
在我看来好像我使用 file_put_contents()
"too early",意思是,在使用 curl_multi_exec()
.
完全接收某些图像数据之前
不幸的是,我没有找到任何类似的问题,也没有找到任何 Google 我的案例的结果,我需要使用 curl_multi_exec,但不想使用 curl 检索和保存图像-opt-header "CURLOPT_FILE
".
希望有人能够帮助我解决我遗漏的问题以及为什么我在本地保存了一些损坏的图像。
以下是检索到的损坏图像的一些示例:
这是我传递给多 CURL 函数的示例数组:
$curl_httpresources = [
[ 'http://www.gravatar.com/avatar/example?d=mm&r=x&s=427'
,'/srv/www/data/images/1_unsplash.jpg' ],
[ 'http://www.gravatar.com/avatar/example?d=identicon&r=x&s=427'
,'/srv/www/data/images/2_unsplash.jpg' ],
[ 'http://www.gravatar.com/avatar/example?d=monsterid&r=x&s=427'
,'/srv/www/data/images/3_unsplash.jpg' ],
[ 'http://www.gravatar.com/avatar/example?d=wavatar&r=x&s=427'
,'/srv/www/data/images/4_unsplash.jpg' ],
[ 'http://www.gravatar.com/avatar/example?d=retro&r=x&s=427'
,'/srv/www/data/images/5_unsplash.jpg' ],
[ 'http://www.gravatar.com/avatar/example?d=mm&r=x&s=427'
,'/srv/www/data/images/6_unsplash.jpg' ],
[ 'http://www.gravatar.com/avatar/example?d=identicon&r=x&s=427'
,'/srv/www/data/images/7_unsplash.jpg' ],
[ 'http://www.gravatar.com/avatar/example?d=monsterid&r=x&s=427'
,'/srv/www/data/images/8_unsplash.jpg' ],
[ 'http://www.gravatar.com/avatar/example?d=wavatar&r=x&s=427'
,'/srv/www/data/images/9_unsplash.jpg' ],
[ 'http://www.gravatar.com/avatar/example?d=retro&r=x&s=427'
,'/srv/www/data/images/10_unsplash.jpg' ],
];
我的 Multi-cURL PHP 函数
现在我正在使用的功能 - 有点 "works",除了一些部分下载的文件 - 这是代码:
function cURLfetch(array $resources)
{
/** Disable PHP timelimit, because this could take a while... */
set_time_limit(0);
/** Validate the $resources Array (not empty, null, or alike) */
$resources_num = count($resources);
if ( empty($resources) || $resources_num <= 0 ) return false;
/** Callback-Function for writing data to file */
$callback = function($resource, $filepath)
{
file_put_contents($filepath, $resource);
/** For Debug only: output <img>-Tag with saved $resource */
printf('<img src="%s"><br>', str_replace('/srv/www', '', $filepath));
};
/**
* Initialize CURL process for handling multiple parallel requests
*/
$curl_instance = curl_multi_init();
$curl_multi_exec_active = null;
$curl_request_options = [
CURLOPT_USERAGENT => 'PHP-Script/1.0 (+https://website.com/)',
CURLOPT_TIMEOUT => 10,
CURLOPT_FOLLOWLOCATION => true,
CURLOPT_VERBOSE => false,
CURLOPT_RETURNTRANSFER => true,
];
/**
* Looping through all $resources
* $resources[$i][0] = HTTP resource
* $resources[$i][1] = Target Filepath
*/
for ($i = 0; $i < $resources_num; $i++)
{
$curl_requests[$i] = curl_init($resources[$i][0]);
curl_setopt_array($curl_requests[$i], $curl_request_options);
curl_multi_add_handle($curl_instance, $curl_requests[$i]);
}
do {
try {
$curl_execute = curl_multi_exec($curl_instance, $curl_multi_exec_active);
} catch (Exception $e) {
error_log($e->getMessage());
}
} while ($curl_execute == CURLM_CALL_MULTI_PERFORM);
/** Wait until data arrives on all sockets */
$h = 0; // initialise a counter
while ($curl_multi_exec_active && $curl_execute == CURLM_OK)
{
if (curl_multi_select($curl_instance) != -1)
{
do {
$curl_data = curl_multi_exec($curl_instance, $curl_multi_exec_active);
$curl_done = curl_multi_info_read($curl_instance);
/** Check if there is data... */
if ($curl_done['handle'] !== NULL)
{
/** Continue ONLY if HTTP statuscode was OK (200) */
$info = curl_getinfo($curl_done['handle']);
if ($info['http_code'] == 200)
{
if (!empty(curl_multi_getcontent($curl_requests[$h]))) {
/** Curl request successful. Process data using the callback function. */
$callback(curl_multi_getcontent($curl_requests[$h]), $resources[$h][1]);
}
$h++; // count up
}
}
} while ($curl_data == CURLM_CALL_MULTI_PERFORM);
}
}
/** Close all $curl_requests */
foreach($curl_requests as $request) {
curl_multi_remove_handle($curl_instance, $request);
}
curl_multi_close($curl_instance);
return true;
}
/** Start fetching images from an Array */
cURLfetch($curl_httpresources);
非常感谢您的帮助,非常感谢!
我最终只在经典循环中使用常规 cURL 请求来查询所有 >1'000 张图像并下载具有 "HTTP 200 OK" 响应的图像。我最初担心服务器可能会由于潜在的错误识别 DDoS 而切断连接,但没有任何效果,为什么这种方法对我的情况很有效。
这是我正在使用的常规 cURL 请求的最终函数:
function cURLfetchUrl($url, $save_as_file)
{
/** Validate $url & $save_as_file (not empty, null, or alike) */
if ( empty($url) || is_numeric($url) ) return false;
if ( empty($save_as_file) || is_numeric($save_as_file) ) return false;
/** Disable PHP timelimit, because this could take a while... */
set_time_limit(0);
try {
/**
* Set cURL options to be passed to a single request
*/
$curl_request_options = [
CURLOPT_USERAGENT => 'PHP-Script/1.0 (+https://website.com/)',
CURLOPT_TIMEOUT => 5,
CURLOPT_FOLLOWLOCATION => true,
CURLOPT_RETURNTRANSFER => true,
];
/** Initialize & execute cURL-Request */
$curl_instance = curl_init($url);
curl_setopt_array($curl_instance, $curl_request_options);
$curl_data = curl_exec($curl_instance);
$curl_done = curl_getinfo($curl_instance);
/** cURL request successful */
if ($curl_done['http_code'] == 200)
{
/** Open a new file handle, write the file & close the file handle */
if (file_put_contents($save_as_file, $curl_data) !== false) {
// logging if file_put_contents was OK
} else {
// logging if file_put_contents FAILED
}
}
/** Close the $curl_instance */
curl_close($curl_instance);
return true;
} catch (Exception $e) {
error_log($e->getMessage());
return false;
}
}
并执行它:
$curl_httpresources = [
[ 'http://www.gravatar.com/avatar/example?d=mm&r=x&s=427'
,'/srv/www/data/images/1_unsplash.jpg' ],
[ 'http://www.gravatar.com/avatar/example?d=identicon&r=x&s=427'
,'/srv/www/data/images/2_unsplash.jpg' ],
[ 'http://www.gravatar.com/avatar/example?d=monsterid&r=x&s=427'
,'/srv/www/data/images/3_unsplash.jpg' ],
[ 'http://www.gravatar.com/avatar/example?d=wavatar&r=x&s=427'
,'/srv/www/data/images/4_unsplash.jpg' ],
[ 'http://www.gravatar.com/avatar/example?d=retro&r=x&s=427'
,'/srv/www/data/images/5_unsplash.jpg' ],
[ 'http://www.gravatar.com/avatar/example?d=mm&r=x&s=427'
,'/srv/www/data/images/6_unsplash.jpg' ],
[ 'http://www.gravatar.com/avatar/example?d=identicon&r=x&s=427'
,'/srv/www/data/images/7_unsplash.jpg' ],
[ 'http://www.gravatar.com/avatar/example?d=monsterid&r=x&s=427'
,'/srv/www/data/images/8_unsplash.jpg' ],
[ 'http://www.gravatar.com/avatar/example?d=wavatar&r=x&s=427'
,'/srv/www/data/images/9_unsplash.jpg' ],
[ 'http://www.gravatar.com/avatar/example?d=retro&r=x&s=427'
,'/srv/www/data/images/10_unsplash.jpg' ],
];
/** cURL all request from the $curl_httpresources Array */
if (count($curl_httpresources) > 0)
{
foreach ($curl_httpresources as $resource)
{
cURLfetchUrl($resource[0], $resource[1]);
}
}
不过,如果有人有想法,如何使用 curl_multi 正确检索文件数据流,那就太好了,因为我对最初问题的回答只是展示了一种不同的方法 - 而不是解决初始方法。
我已经实现了一个 PHP 函数,它检查并下载大量图像(> 1'000)- 使用数组传递给它 - 使用 PHP curl_multi_init()
方法。
在修改了几次之后,因为我得到了 0 字节的文件等。我现在有一个解决方案可以下载所有图像 - 但下载的所有其他图像文件都不完整。
在我看来好像我使用 file_put_contents()
"too early",意思是,在使用 curl_multi_exec()
.
不幸的是,我没有找到任何类似的问题,也没有找到任何 Google 我的案例的结果,我需要使用 curl_multi_exec,但不想使用 curl 检索和保存图像-opt-header "CURLOPT_FILE
".
希望有人能够帮助我解决我遗漏的问题以及为什么我在本地保存了一些损坏的图像。
以下是检索到的损坏图像的一些示例:
这是我传递给多 CURL 函数的示例数组:
$curl_httpresources = [
[ 'http://www.gravatar.com/avatar/example?d=mm&r=x&s=427'
,'/srv/www/data/images/1_unsplash.jpg' ],
[ 'http://www.gravatar.com/avatar/example?d=identicon&r=x&s=427'
,'/srv/www/data/images/2_unsplash.jpg' ],
[ 'http://www.gravatar.com/avatar/example?d=monsterid&r=x&s=427'
,'/srv/www/data/images/3_unsplash.jpg' ],
[ 'http://www.gravatar.com/avatar/example?d=wavatar&r=x&s=427'
,'/srv/www/data/images/4_unsplash.jpg' ],
[ 'http://www.gravatar.com/avatar/example?d=retro&r=x&s=427'
,'/srv/www/data/images/5_unsplash.jpg' ],
[ 'http://www.gravatar.com/avatar/example?d=mm&r=x&s=427'
,'/srv/www/data/images/6_unsplash.jpg' ],
[ 'http://www.gravatar.com/avatar/example?d=identicon&r=x&s=427'
,'/srv/www/data/images/7_unsplash.jpg' ],
[ 'http://www.gravatar.com/avatar/example?d=monsterid&r=x&s=427'
,'/srv/www/data/images/8_unsplash.jpg' ],
[ 'http://www.gravatar.com/avatar/example?d=wavatar&r=x&s=427'
,'/srv/www/data/images/9_unsplash.jpg' ],
[ 'http://www.gravatar.com/avatar/example?d=retro&r=x&s=427'
,'/srv/www/data/images/10_unsplash.jpg' ],
];
我的 Multi-cURL PHP 函数
现在我正在使用的功能 - 有点 "works",除了一些部分下载的文件 - 这是代码:
function cURLfetch(array $resources)
{
/** Disable PHP timelimit, because this could take a while... */
set_time_limit(0);
/** Validate the $resources Array (not empty, null, or alike) */
$resources_num = count($resources);
if ( empty($resources) || $resources_num <= 0 ) return false;
/** Callback-Function for writing data to file */
$callback = function($resource, $filepath)
{
file_put_contents($filepath, $resource);
/** For Debug only: output <img>-Tag with saved $resource */
printf('<img src="%s"><br>', str_replace('/srv/www', '', $filepath));
};
/**
* Initialize CURL process for handling multiple parallel requests
*/
$curl_instance = curl_multi_init();
$curl_multi_exec_active = null;
$curl_request_options = [
CURLOPT_USERAGENT => 'PHP-Script/1.0 (+https://website.com/)',
CURLOPT_TIMEOUT => 10,
CURLOPT_FOLLOWLOCATION => true,
CURLOPT_VERBOSE => false,
CURLOPT_RETURNTRANSFER => true,
];
/**
* Looping through all $resources
* $resources[$i][0] = HTTP resource
* $resources[$i][1] = Target Filepath
*/
for ($i = 0; $i < $resources_num; $i++)
{
$curl_requests[$i] = curl_init($resources[$i][0]);
curl_setopt_array($curl_requests[$i], $curl_request_options);
curl_multi_add_handle($curl_instance, $curl_requests[$i]);
}
do {
try {
$curl_execute = curl_multi_exec($curl_instance, $curl_multi_exec_active);
} catch (Exception $e) {
error_log($e->getMessage());
}
} while ($curl_execute == CURLM_CALL_MULTI_PERFORM);
/** Wait until data arrives on all sockets */
$h = 0; // initialise a counter
while ($curl_multi_exec_active && $curl_execute == CURLM_OK)
{
if (curl_multi_select($curl_instance) != -1)
{
do {
$curl_data = curl_multi_exec($curl_instance, $curl_multi_exec_active);
$curl_done = curl_multi_info_read($curl_instance);
/** Check if there is data... */
if ($curl_done['handle'] !== NULL)
{
/** Continue ONLY if HTTP statuscode was OK (200) */
$info = curl_getinfo($curl_done['handle']);
if ($info['http_code'] == 200)
{
if (!empty(curl_multi_getcontent($curl_requests[$h]))) {
/** Curl request successful. Process data using the callback function. */
$callback(curl_multi_getcontent($curl_requests[$h]), $resources[$h][1]);
}
$h++; // count up
}
}
} while ($curl_data == CURLM_CALL_MULTI_PERFORM);
}
}
/** Close all $curl_requests */
foreach($curl_requests as $request) {
curl_multi_remove_handle($curl_instance, $request);
}
curl_multi_close($curl_instance);
return true;
}
/** Start fetching images from an Array */
cURLfetch($curl_httpresources);
非常感谢您的帮助,非常感谢!
我最终只在经典循环中使用常规 cURL 请求来查询所有 >1'000 张图像并下载具有 "HTTP 200 OK" 响应的图像。我最初担心服务器可能会由于潜在的错误识别 DDoS 而切断连接,但没有任何效果,为什么这种方法对我的情况很有效。
这是我正在使用的常规 cURL 请求的最终函数:
function cURLfetchUrl($url, $save_as_file)
{
/** Validate $url & $save_as_file (not empty, null, or alike) */
if ( empty($url) || is_numeric($url) ) return false;
if ( empty($save_as_file) || is_numeric($save_as_file) ) return false;
/** Disable PHP timelimit, because this could take a while... */
set_time_limit(0);
try {
/**
* Set cURL options to be passed to a single request
*/
$curl_request_options = [
CURLOPT_USERAGENT => 'PHP-Script/1.0 (+https://website.com/)',
CURLOPT_TIMEOUT => 5,
CURLOPT_FOLLOWLOCATION => true,
CURLOPT_RETURNTRANSFER => true,
];
/** Initialize & execute cURL-Request */
$curl_instance = curl_init($url);
curl_setopt_array($curl_instance, $curl_request_options);
$curl_data = curl_exec($curl_instance);
$curl_done = curl_getinfo($curl_instance);
/** cURL request successful */
if ($curl_done['http_code'] == 200)
{
/** Open a new file handle, write the file & close the file handle */
if (file_put_contents($save_as_file, $curl_data) !== false) {
// logging if file_put_contents was OK
} else {
// logging if file_put_contents FAILED
}
}
/** Close the $curl_instance */
curl_close($curl_instance);
return true;
} catch (Exception $e) {
error_log($e->getMessage());
return false;
}
}
并执行它:
$curl_httpresources = [
[ 'http://www.gravatar.com/avatar/example?d=mm&r=x&s=427'
,'/srv/www/data/images/1_unsplash.jpg' ],
[ 'http://www.gravatar.com/avatar/example?d=identicon&r=x&s=427'
,'/srv/www/data/images/2_unsplash.jpg' ],
[ 'http://www.gravatar.com/avatar/example?d=monsterid&r=x&s=427'
,'/srv/www/data/images/3_unsplash.jpg' ],
[ 'http://www.gravatar.com/avatar/example?d=wavatar&r=x&s=427'
,'/srv/www/data/images/4_unsplash.jpg' ],
[ 'http://www.gravatar.com/avatar/example?d=retro&r=x&s=427'
,'/srv/www/data/images/5_unsplash.jpg' ],
[ 'http://www.gravatar.com/avatar/example?d=mm&r=x&s=427'
,'/srv/www/data/images/6_unsplash.jpg' ],
[ 'http://www.gravatar.com/avatar/example?d=identicon&r=x&s=427'
,'/srv/www/data/images/7_unsplash.jpg' ],
[ 'http://www.gravatar.com/avatar/example?d=monsterid&r=x&s=427'
,'/srv/www/data/images/8_unsplash.jpg' ],
[ 'http://www.gravatar.com/avatar/example?d=wavatar&r=x&s=427'
,'/srv/www/data/images/9_unsplash.jpg' ],
[ 'http://www.gravatar.com/avatar/example?d=retro&r=x&s=427'
,'/srv/www/data/images/10_unsplash.jpg' ],
];
/** cURL all request from the $curl_httpresources Array */
if (count($curl_httpresources) > 0)
{
foreach ($curl_httpresources as $resource)
{
cURLfetchUrl($resource[0], $resource[1]);
}
}
不过,如果有人有想法,如何使用 curl_multi 正确检索文件数据流,那就太好了,因为我对最初问题的回答只是展示了一种不同的方法 - 而不是解决初始方法。