LNK1169 错误我找不到

LNK1169 Error I can't find

LNK1169:找到一个或多个多次定义的符号。

我的项目是一个并行化的随机数生成器,使用 MPI 和并行前缀算法。我查找了很多 LNK1169 错误的解决方案。为了防止它,我将许多变量设为静态,我寻找多重定义的变量,但找不到任何变量。我没有可以多次定义变量的头文件。如果有人能帮助我找到错误,我将不胜感激。我相当确定错误发生在 functions.cpp 某处,因为在我尝试实现 parallel_prefix 函数之前一切都在正确构建。

这也是 LNK2005 的输出:

LNK2005 "class std::vector >,class std::allocator > > > __cdecl parallel_prefix(class std::vector >,class std::allocator > > >,class std::allocator >,class std::allocator > > > > >,int,int)" (?parallel_prefix@@YA?AV?$vector@V?$vector@HV?$allocator@H@std@@@std@@V?$allocator @V?$vector@HV?$allocator@H@std@@@std@@@2@@std@@V?$vector@V?$vector@V?$vector@HV?$allocator@H@std @@@std@@V?$allocator@V?$vector@HV?$allocator@H@std@@@std@@@2@@std@@V?$allocator@V?$vector@V?$ vector@HV?$allocator@H@std@@@std@@V?$allocator@V?$vector@HV?$allocator@H@std@@@std@@@2@@std@@@2@ @2@HH@Z) 已在 functions.obj RandomNumberGenerator

中定义

这是我的代码。

RandomNumberGenerator.cpp

#include "functions.cpp"

int main(int argc, char *argv[])
{
    // Establishes what rank it is, and how many processes are running.
    static int rank, p, n, per_Process;
    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &p);
    static vector<int> Broadcast_data;
    n = 100;
    per_Process = n / p;

// The first and second arguments are constants for number generation, the third is a large prime to mod by, and the fourth is a random seed. x1 is calculated based off x0.
// All provided by the user except x1.
// Rank 0 broadcasts the data to all processes.
if (rank == 0)
{
    for (static int i = 1; i < 5; i++)
    {
        Broadcast_data.push_back(std::atoi(argv[i]));
    }
    Broadcast_data.push_back(std::atoi(argv[1]) *std::atoi(argv[4]) % std::atoi(argv[3]));

    // NOTE: THIS PUSH BACK IS HOW MANY RANDOM NUMBERS WILL BE GENERATED
    Broadcast_data.push_back(n);
    cout << "Rank " << rank << " Broadcast Data: ";
    for (static int i = 0; i < 6; i++)
    {
        cout << Broadcast_data[i] << " ";
    }
    cout << endl;
}
else
{
    Broadcast_data.resize(6);
}
MPI_Bcast(Broadcast_data.data(), 6, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);

// Initialize an array of n/p values at every process.  Each of the n/p values is the matrix M.
// M is this 2 dimmensional array:
// [ a 1 ]
// [ b 0 ]
static vector<vector<int>> M;
M.resize(2);
M[0].resize(2);
M[1].resize(2);
M[0][0] = Broadcast_data[0];
M[0][1] = Broadcast_data[1];
M[1][0] = 1;
M[1][1] = 0;

// Now we must initialize the array of these M values.  Notation might get complex here
// as we are dealing with 3D arrays.
static vector<vector<vector<int>>> M_values;
M_values.resize(per_Process);
for (static int i = 0; i < per_Process; i++)
{
    M_values.push_back(M);
}

// Now we are ready for the parallel prefix operation.  Note that the operator here
// is matrix multiplication.
static vector<vector<int>> prefix;
prefix = parallel_prefix(M_values, rank, p);


MPI_Finalize();

return 0;
}

functions.cpp

#include <mpi.h>
#include <iostream>
#include <cstdlib>
#include <string>
#include <vector>
#include <time.h>

using namespace std;

// This is parallel prefix with the operator being matrix multiplication
vector<vector<int>> parallel_prefix(vector<vector<vector<int>>> Matrices, int rank, int p)
{
    // The first step is a local multiplication of all M values.
    // In a matrix represented by:
    // [ a b ]
    // [ c d ]
    // The new matrix will be this:
    // [ a^2+bc ab+bd ]
    // [ ca+dc cb+d^2 ]
    // So the first step will be to complete this operation once for every matrix M in M_values

static vector<vector<int>> local_sum;
local_sum = Matrices[0];
for (static int i = 1; i < Matrices.size(); i++)
{
    vector<vector<int>> temp_vector;
    temp_vector = local_sum;
    temp_vector[0][0] = local_sum[0][0] * Matrices[i][0][0] + local_sum[1][0] * Matrices[i][0][1];
    temp_vector[0][1] = local_sum[0][1] * Matrices[i][0][0] + local_sum[1][1] * Matrices[i][0][1];
    temp_vector[1][0] = local_sum[0][0] * Matrices[i][1][0] + local_sum[0][1] * Matrices[i][1][1];
    temp_vector[1][1] = local_sum[0][1] * Matrices[i][1][0] + local_sum[1][1] * Matrices[i][1][1];

    local_sum = temp_vector;
}
// Now that all the local sums have been computed we can start step 2: communication.

// Determine how many steps it will take
int steps = 0;
while (int j = 1 < p)
{
    j *= 2;
    steps++;
}
while (int k = 0 < steps)
{
    // First determine the rank's mate.
    static int mate;
    mate = rank | (1u << steps);

    // Now we send the local sum to mate, and receive our mate's local sum.
    // First modify the local sum vector to a vector that can be sent.
    // Send vector syntax is [ a c b d ]
    static vector<int> send_vector, recv_vector;
    send_vector.resize(4);
    recv_vector.resize(4);
    send_vector[0] = local_sum[0][0];
    send_vector[1] = local_sum[0][1];
    send_vector[2] = local_sum[1][0];
    send_vector[3] = local_sum[1][1];

    // Send the vector to your mate, and recieve a vector from your mate.
    static MPI_Status status;
    MPI_Send(send_vector.data(), 4, MPI_INT, mate, 0, MPI_COMM_WORLD);
    MPI_Recv(recv_vector.data(), 4, MPI_INT, mate, 1, MPI_COMM_WORLD, &status);

    // Update the local sum if your mate rank is lower than your rank.
    if (mate < rank)
    {
        static vector<vector<int>> temp_vector;
        temp_vector = local_sum;
        temp_vector[0][0] = local_sum[0][0] * recv_vector[0] + local_sum[1][0] * recv_vector[1];
        temp_vector[0][1] = local_sum[0][1] * recv_vector[0] + local_sum[1][1] * recv_vector[1];
        temp_vector[1][0] = local_sum[0][0] * recv_vector[2] + local_sum[0][1] * recv_vector[3];
        temp_vector[1][1] = local_sum[0][1] * recv_vector[2] + local_sum[1][1] * recv_vector[3];

        local_sum = temp_vector;
    }
    MPI_Barrier(MPI_COMM_WORLD);
    k++;
    // After completion of this loop the local sum is the parallel prefix output for each process.
}


return local_sum;
}

您将 functions.cpp 包括在 main.cpp 中,并且可能还包括在您的项目中。这会编译 functions.cpp 中的内容两次。

不要在 main 中包含 functions.cpp。使用 functions.h 从其中声明函数。