当我在 C++ 中推回一对时出错
Error when I push back a pair in C++
我一直在尝试编译我的程序,它应该将一个字符串和一个浮点数对推回一个向量:
typedef std::pair<string, float> Prediction;
std::vector<Prediction> predictions;
for ( int i = 0 ; i < output.size(); i++ ) {
std::vector<int> maxN = Argmax(output[i], 1);
int idx = maxN[0];
predictions.push_back(std::make_pair(labels_[idx], output[idx]));
}
return predictions;
然而,每次我尝试编译它时,我都会得到这个错误:
error: no matching member function for call to 'push_back'
predictions.push_back(std::make_pair(labels_[idx], output[idx]));
我还收到一些其他警告,比如
candidate function not viable: no known conversion from 'pair<[...],
typename __make_pair_return >
&>::type>' to 'const pair<[...], float>' for 1st argument
_LIBCPP_INLINE_VISIBILITY void push_back(const_reference __x);
和
candidate function not viable: no known conversion from 'pair<[...],
typename __make_pair_return >
&>::type>' to 'pair<[...], float>' for 1st argument
_LIBCPP_INLINE_VISIBILITY void push_back(value_type&& __x);
我一直在尝试重写一些东西并修改我的函数,但我无法弄清楚为什么这个错误仍然存在,有人知道我可以做些什么来解决这个问题吗?
如果有帮助,这里是上下文中的代码,头文件:
/**
* Classification System
*/
#ifndef __CLASSIFIER_H__
#define __CLASSIFIER_H__
#include <caffe/caffe.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <algorithm>
#include <iosfwd>
#include <memory>
#include <string>
#include <utility>
#include <vector>
using namespace caffe; // NOLINT(build/namespaces)
using std::string;
/* Pair (label, confidence) representing a prediction. */
typedef std::pair<string, float> Prediction;
class Classifier {
public:
Classifier(const string& model_file,
const string& trained_file,
const string& label_file);
std::vector< Prediction > Classify(const std::vector<cv::Mat>& img);
private:
std::vector< std::vector<float> > Predict(const std::vector<cv::Mat>& img, int nImages);
void WrapInputLayer(std::vector<cv::Mat>* input_channels, int nImages);
void Preprocess(const std::vector<cv::Mat>& img,
std::vector<cv::Mat>* input_channels, int nImages);
private:
shared_ptr<Net<float> > net_;
cv::Size input_geometry_;
int num_channels_;
std::vector<string> labels_;
};
#endif /* __CLASSIFIER_H__ */
Class 文件:
#define CPU_ONLY
#include "Classifier.h"
using namespace caffe; // NOLINT(build/namespaces)
using std::string;
Classifier::Classifier(const string& model_file,
const string& trained_file,
const string& label_file) {
#ifdef CPU_ONLY
Caffe::set_mode(Caffe::CPU);
#else
Caffe::set_mode(Caffe::GPU);
#endif
/* Load the network. */
net_.reset(new Net<float>(model_file, TEST));
net_->CopyTrainedLayersFrom(trained_file);
CHECK_EQ(net_->num_inputs(), 1) << "Network should have exactly one input.";
CHECK_EQ(net_->num_outputs(), 1) << "Network should have exactly one output.";
Blob<float>* input_layer = net_->input_blobs()[0];
num_channels_ = input_layer->channels();
CHECK(num_channels_ == 3 || num_channels_ == 1)
<< "Input layer should have 1 or 3 channels.";
input_geometry_ = cv::Size(input_layer->width(), input_layer->height());
/* Load labels. */
std::ifstream labels(label_file.c_str());
CHECK(labels) << "Unable to open labels file " << label_file;
string line;
while (std::getline(labels, line))
labels_.push_back(string(line));
Blob<float>* output_layer = net_->output_blobs()[0];
CHECK_EQ(labels_.size(), output_layer->channels())
<< "Number of labels is different from the output layer dimension.";
}
static bool PairCompare(const std::pair<float, int>& lhs,
const std::pair<float, int>& rhs) {
return lhs.first > rhs.first;
}
/* Return the indices of the top N values of vector v. */
static std::vector<int> Argmax(const std::vector<float>& v, int N) {
std::vector<std::pair<float, int> > pairs;
for (size_t i = 0; i < v.size(); ++i)
pairs.push_back(std::make_pair(v[i], i));
std::partial_sort(pairs.begin(), pairs.begin() + N, pairs.end(), PairCompare);
std::vector<int> result;
for (int i = 0; i < N; ++i)
result.push_back(pairs[i].second);
return result;
}
std::vector<Prediction> Classifier::Classify(const std::vector<cv::Mat>& img) {
std::vector< std::vector<float> > output = Predict(img, img.size());
std::vector<Prediction> predictions;
for ( int i = 0 ; i < output.size(); i++ ) {
std::vector<int> maxN = Argmax(output[i], 1);
int idx = maxN[0];
predictions.push_back(std::make_pair(labels_[idx], output[idx]));
}
return predictions;
}
std::vector< std::vector<float> > Classifier::Predict(const std::vector<cv::Mat>& img, int nImages) {
Blob<float>* input_layer = net_->input_blobs()[0];
input_layer->Reshape(nImages, num_channels_,
input_geometry_.height, input_geometry_.width);
/* Forward dimension change to all layers. */
net_->Reshape();
std::vector<cv::Mat> input_channels;
WrapInputLayer(&input_channels, nImages);
Preprocess(img, &input_channels, nImages);
net_->ForwardPrefilled();
/* Copy the output layer to a std::vector */
Blob<float>* output_layer = net_->output_blobs()[0];
std::vector <std::vector<float> > ret;
for (int i = 0; i < nImages; i++) {
const float* begin = output_layer->cpu_data() + i*output_layer->channels();
const float* end = begin + output_layer->channels();
ret.push_back( std::vector<float>(begin, end) );
}
return ret;
}
/* Wrap the input layer of the network in separate cv::Mat objects
* (one per channel). This way we save one memcpy operation and we
* don't need to rely on cudaMemcpy2D. The last preprocessing
* operation will write the separate channels directly to the input
* layer. */
void Classifier::WrapInputLayer(std::vector<cv::Mat>* input_channels, int nImages) {
Blob<float>* input_layer = net_->input_blobs()[0];
int width = input_layer->width();
int height = input_layer->height();
float* input_data = input_layer->mutable_cpu_data();
for (int i = 0; i < input_layer->channels()* nImages; ++i) {
cv::Mat channel(height, width, CV_32FC1, input_data);
input_channels->push_back(channel);
input_data += width * height;
}
}
void Classifier::Preprocess(const std::vector<cv::Mat>& img,
std::vector<cv::Mat>* input_channels, int nImages) {
for (int i = 0; i < nImages; i++) {
vector<cv::Mat> channels;
cv::split(img[i], channels);
for (int j = 0; j < channels.size(); j++){
channels[j].copyTo((*input_channels)[i*num_channels_[0]+j]);
}
}
}
非常感谢!
typedef std::pair<string, float> Prediction;
std::vector<Prediction> predictions;
std::vector< std::vector<float> > output = Predict(img, img.size());
make_pair 需要一个字符串和一个 浮点数 。 output[idx] 给出了一个 浮点向量 。所以你只需要一个 float.
的 output[i][idx]
我一直在尝试编译我的程序,它应该将一个字符串和一个浮点数对推回一个向量:
typedef std::pair<string, float> Prediction;
std::vector<Prediction> predictions;
for ( int i = 0 ; i < output.size(); i++ ) {
std::vector<int> maxN = Argmax(output[i], 1);
int idx = maxN[0];
predictions.push_back(std::make_pair(labels_[idx], output[idx]));
}
return predictions;
然而,每次我尝试编译它时,我都会得到这个错误:
error: no matching member function for call to 'push_back' predictions.push_back(std::make_pair(labels_[idx], output[idx]));
我还收到一些其他警告,比如
candidate function not viable: no known conversion from 'pair<[...], typename __make_pair_return > &>::type>' to 'const pair<[...], float>' for 1st argument _LIBCPP_INLINE_VISIBILITY void push_back(const_reference __x);
和
candidate function not viable: no known conversion from 'pair<[...], typename __make_pair_return > &>::type>' to 'pair<[...], float>' for 1st argument _LIBCPP_INLINE_VISIBILITY void push_back(value_type&& __x);
我一直在尝试重写一些东西并修改我的函数,但我无法弄清楚为什么这个错误仍然存在,有人知道我可以做些什么来解决这个问题吗?
如果有帮助,这里是上下文中的代码,头文件:
/**
* Classification System
*/
#ifndef __CLASSIFIER_H__
#define __CLASSIFIER_H__
#include <caffe/caffe.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <algorithm>
#include <iosfwd>
#include <memory>
#include <string>
#include <utility>
#include <vector>
using namespace caffe; // NOLINT(build/namespaces)
using std::string;
/* Pair (label, confidence) representing a prediction. */
typedef std::pair<string, float> Prediction;
class Classifier {
public:
Classifier(const string& model_file,
const string& trained_file,
const string& label_file);
std::vector< Prediction > Classify(const std::vector<cv::Mat>& img);
private:
std::vector< std::vector<float> > Predict(const std::vector<cv::Mat>& img, int nImages);
void WrapInputLayer(std::vector<cv::Mat>* input_channels, int nImages);
void Preprocess(const std::vector<cv::Mat>& img,
std::vector<cv::Mat>* input_channels, int nImages);
private:
shared_ptr<Net<float> > net_;
cv::Size input_geometry_;
int num_channels_;
std::vector<string> labels_;
};
#endif /* __CLASSIFIER_H__ */
Class 文件:
#define CPU_ONLY
#include "Classifier.h"
using namespace caffe; // NOLINT(build/namespaces)
using std::string;
Classifier::Classifier(const string& model_file,
const string& trained_file,
const string& label_file) {
#ifdef CPU_ONLY
Caffe::set_mode(Caffe::CPU);
#else
Caffe::set_mode(Caffe::GPU);
#endif
/* Load the network. */
net_.reset(new Net<float>(model_file, TEST));
net_->CopyTrainedLayersFrom(trained_file);
CHECK_EQ(net_->num_inputs(), 1) << "Network should have exactly one input.";
CHECK_EQ(net_->num_outputs(), 1) << "Network should have exactly one output.";
Blob<float>* input_layer = net_->input_blobs()[0];
num_channels_ = input_layer->channels();
CHECK(num_channels_ == 3 || num_channels_ == 1)
<< "Input layer should have 1 or 3 channels.";
input_geometry_ = cv::Size(input_layer->width(), input_layer->height());
/* Load labels. */
std::ifstream labels(label_file.c_str());
CHECK(labels) << "Unable to open labels file " << label_file;
string line;
while (std::getline(labels, line))
labels_.push_back(string(line));
Blob<float>* output_layer = net_->output_blobs()[0];
CHECK_EQ(labels_.size(), output_layer->channels())
<< "Number of labels is different from the output layer dimension.";
}
static bool PairCompare(const std::pair<float, int>& lhs,
const std::pair<float, int>& rhs) {
return lhs.first > rhs.first;
}
/* Return the indices of the top N values of vector v. */
static std::vector<int> Argmax(const std::vector<float>& v, int N) {
std::vector<std::pair<float, int> > pairs;
for (size_t i = 0; i < v.size(); ++i)
pairs.push_back(std::make_pair(v[i], i));
std::partial_sort(pairs.begin(), pairs.begin() + N, pairs.end(), PairCompare);
std::vector<int> result;
for (int i = 0; i < N; ++i)
result.push_back(pairs[i].second);
return result;
}
std::vector<Prediction> Classifier::Classify(const std::vector<cv::Mat>& img) {
std::vector< std::vector<float> > output = Predict(img, img.size());
std::vector<Prediction> predictions;
for ( int i = 0 ; i < output.size(); i++ ) {
std::vector<int> maxN = Argmax(output[i], 1);
int idx = maxN[0];
predictions.push_back(std::make_pair(labels_[idx], output[idx]));
}
return predictions;
}
std::vector< std::vector<float> > Classifier::Predict(const std::vector<cv::Mat>& img, int nImages) {
Blob<float>* input_layer = net_->input_blobs()[0];
input_layer->Reshape(nImages, num_channels_,
input_geometry_.height, input_geometry_.width);
/* Forward dimension change to all layers. */
net_->Reshape();
std::vector<cv::Mat> input_channels;
WrapInputLayer(&input_channels, nImages);
Preprocess(img, &input_channels, nImages);
net_->ForwardPrefilled();
/* Copy the output layer to a std::vector */
Blob<float>* output_layer = net_->output_blobs()[0];
std::vector <std::vector<float> > ret;
for (int i = 0; i < nImages; i++) {
const float* begin = output_layer->cpu_data() + i*output_layer->channels();
const float* end = begin + output_layer->channels();
ret.push_back( std::vector<float>(begin, end) );
}
return ret;
}
/* Wrap the input layer of the network in separate cv::Mat objects
* (one per channel). This way we save one memcpy operation and we
* don't need to rely on cudaMemcpy2D. The last preprocessing
* operation will write the separate channels directly to the input
* layer. */
void Classifier::WrapInputLayer(std::vector<cv::Mat>* input_channels, int nImages) {
Blob<float>* input_layer = net_->input_blobs()[0];
int width = input_layer->width();
int height = input_layer->height();
float* input_data = input_layer->mutable_cpu_data();
for (int i = 0; i < input_layer->channels()* nImages; ++i) {
cv::Mat channel(height, width, CV_32FC1, input_data);
input_channels->push_back(channel);
input_data += width * height;
}
}
void Classifier::Preprocess(const std::vector<cv::Mat>& img,
std::vector<cv::Mat>* input_channels, int nImages) {
for (int i = 0; i < nImages; i++) {
vector<cv::Mat> channels;
cv::split(img[i], channels);
for (int j = 0; j < channels.size(); j++){
channels[j].copyTo((*input_channels)[i*num_channels_[0]+j]);
}
}
}
非常感谢!
typedef std::pair<string, float> Prediction;
std::vector<Prediction> predictions;
std::vector< std::vector<float> > output = Predict(img, img.size());
make_pair 需要一个字符串和一个 浮点数 。 output[idx] 给出了一个 浮点向量 。所以你只需要一个 float.
的 output[i][idx]