提升 TCP 客户端连接到多个服务器

Boost TCP client to connect to multiple servers

我希望我的 TCP 客户端连接到多个服务器(每个服务器都有一个单独的 IP 和端口)。
我正在使用 async_connect。我可以成功连接到不同的服务器,但是 read/write 失败,因为服务器对应的 tcp::socket 对象不可用。
您能否建议我如何将每个服务器的套接字存储在某种数据结构中?我尝试将 IP、套接字保存到 std::map,但第一个服务器的套接字对象在内存中不可用,应用程序崩溃了。
我试过将套接字设为静态,但也无济于事。

请帮帮我!!

此外,我希望我在使单个 TCP 客户端连接到 2 个不同的服务器时在逻辑上是正确的。 我在下面分享简化的头文件和 cpp 文件。

class TCPClient: public Socket
{
public:
    TCPClient(boost::asio::io_service& io_service,
        boost::asio::ip::tcp::endpoint ep);
    virtual ~TCPClient();
    void Connect(boost::asio::ip::tcp::endpoint ep, boost::asio::io_service &ioService, void (Comm::*SaveClientDetails)(std::string,void*),
        void *pClassInstance);
    
    void TransmitData(const INT8 *pi8Buffer);
    void HandleWrite(const boost::system::error_code& err, 
    size_t szBytesTransferred);
    void HandleConnect(const boost::system::error_code &err, 
        void (Comm::*SaveClientDetails)(std::string,void*),
        void *pClassInstance, std::string sIPAddr);
    static tcp::socket* CreateSocket(boost::asio::io_service &ioService)    
        {   return new tcp::socket(ioService); }
    static tcp::socket *mSocket;
private:
    std::string sMsgRead;
    INT8 i8Data[MAX_BUFFER_LENGTH];
    std::string sMsg;
    boost::asio::deadline_timer mTimer;
};

tcp::socket* TCPClient::mSocket = NULL;

TCPClient::TCPClient(boost::asio::io_service &ioService,
        boost::asio::ip::tcp::endpoint ep) :
        mTimer(ioService)
{
}

void TCPClient::Connect(boost::asio::ip::tcp::endpoint ep, 
        boost::asio::io_service &ioService, 
        void (Comm::*SaveServerDetails)(std::string,void*),
        void *pClassInstance)
{
    mSocket = CreateSocket(ioService);
    std::string sIPAddr = ep.address().to_string();
    /* To send connection request to server*/
    mSocket->async_connect(ep,boost::bind(&TCPClient::HandleConnect, this,
            boost::asio::placeholders::error, SaveServerDetails,
            pClassInstance, sIPAddr));
}

void TCPClient::HandleConnect(const boost::system::error_code &err,
        void (Comm::*SaveServerDetails)(std::string,void*),
        void *pClassInstance, std::string sIPAddr)
{
    if (!err)
    {
        Comm* pInstance = (Comm*) pClassInstance;
        if (NULL == pInstance) 
        {
            break;
        }
        (pInstance->*SaveServerDetails)(sIPAddr,(void*)(mSocket));
    }
    else
    {
        break;
    }
}

void TCPClient::TransmitData(const INT8 *pi8Buffer)
{
    sMsg = pi8Buffer;
    if (sMsg.empty()) 
    {
        break;
    }
    mSocket->async_write_some(boost::asio::buffer(sMsg, MAX_BUFFER_LENGTH),
            boost::bind(&TCPClient::HandleWrite, this,
                    boost::asio::placeholders::error,
                    boost::asio::placeholders::bytes_transferred));
}

void TCPClient::HandleWrite(const boost::system::error_code &err,
        size_t szBytesTransferred) 
{
        if (!err) 
        {
            std::cout<< "Data written to TCP Client port! ";
        } 
        else 
        {
            break;
        }
}

您似乎知道您的问题:套接字对象不可用。这是 100% 的选择。你选择静态化,当然只有一个实例。

Also, I hope I am logically correct in making a single TCP client connect to 2 different servers.

我觉得不对。您可以 将“客户端”重新定义为具有多个 TCP 连接 的东西。在这种情况下,您至少希望 tcp::socket 对象的容器能够 容纳 那些(或者,您知道,包含 tcp::socketConnection 对象=].

奖金:演示

为了乐趣和荣耀,这就是我认为你应该寻找的东西。

备注:

  • 不再新增,删除
  • 不再有 void*,重新解释转换 (!!!)
  • 减少手动缓冲区sizing/handling
  • 没有了bind
  • 相应异步操作的缓冲区生命周期得到保证
  • 每个连接的消息队列
  • 为了在多线程环境中正确同步访问共享状态,连接在一个链上
  • 我在一个连接中添加了最大空闲时间超时;它还限制了任何异步操作所花费的时间 (connect/write)。我假设您想要这样的东西,因为 (a) 它很常见 (b) 您的问题代码
  • 中有一个未使用的 deadline_timer

注意使用共享指针让 Comm 管理其自身生命周期的技术。另请注意,_socket_outbox 归个人 Comm 实例所有。

Live On Coliru

#include <boost/asio.hpp>
#include <deque>
#include <iostream>

using INT8 = char;
using boost::asio::ip::tcp;
using boost::system::error_code;
//using SaveFunc = std::function<void(std::string, void*)>; // TODO abolish void*
using namespace std::chrono_literals;
using duration = std::chrono::high_resolution_clock::duration;

static inline constexpr size_t MAX_BUFFER_LENGTH = 1024;

using Handle  = std::weak_ptr<class Comm>;

class Comm : public std::enable_shared_from_this<Comm> {
  public:
    template <typename Executor>
    explicit Comm(Executor ex, tcp::endpoint ep, // ex assumed to be strand
                  duration max_idle)
        : _ep(ep)
        , _max_idle(max_idle)
        , _socket{ex}
        , _timer{_socket.get_executor()}
    {
    }

    ~Comm() { std::cerr << "Comm closed (" << _ep << ")\n"; }

    void Start() {
        post(_socket.get_executor(), [this, self = shared_from_this()] {
            _socket.async_connect(
                _ep, [this, self = shared_from_this()](error_code ec) {
                    std::cerr << "Connect: " << ec.message() << std::endl;
                    if (!ec)
                        DoIdle();
                    else
                        _timer.cancel();
                });
            DoIdle();
        });
    }

    void Stop() {
        post(_socket.get_executor(), [this, self = shared_from_this()] {
            if (not _outbox.empty())
                std::cerr << "Warning: some messages may be undelivered ("
                          << _ep << ")" << std::endl;
            _socket.cancel();
            _timer.cancel();
        });
    }

    void TransmitData(std::string_view msg) {
        post(_socket.get_executor(),
             [this, self = shared_from_this(), msg = std::string(msg.substr(0, MAX_BUFFER_LENGTH))] {
                 _outbox.emplace_back(std::move(msg));

                 if (_outbox.size() == 1) { // no send loop already active?
                     DoSendLoop();
                 }
             });
    }

  private:
    // The DoXXXX functions are assumed to be on the strand
    void DoSendLoop() {
        DoIdle(); // restart max_idle even after last successful send
        if (_outbox.empty())
            return;

        boost::asio::async_write(
            _socket, boost::asio::buffer(_outbox.front()),
            [this, self = shared_from_this()](error_code ec, size_t xfr) {
                std::cerr << "Write " << xfr << " bytes to " << _ep << " " << ec.message() << std::endl;
                if (!ec) {
                    _outbox.pop_front();
                    DoSendLoop();
                } else
                    _timer.cancel(); // causes Comm shutdown
            });
    }

    void DoIdle() {
        _timer.expires_from_now(_max_idle); // cancels any pending wait
        _timer.async_wait([this, self = shared_from_this()](error_code ec) {
            if (!ec) {
                std::cerr << "Timeout" << std::endl;
                _socket.cancel();
            }
        });
    }

    tcp::endpoint                      _ep;
    duration                           _max_idle;
    tcp::socket                        _socket;
    boost::asio::high_resolution_timer _timer;
    std::deque<std::string>            _outbox;
};

class TCPClient {
    boost::asio::any_io_executor _ex;
    std::deque<Handle>           _comms;

  public:
    TCPClient(boost::asio::any_io_executor ex) : _ex(ex) {}

    void Add(tcp::endpoint ep, duration max_idle = 3s)
    {
        auto pcomm = std::make_shared<Comm>(make_strand(_ex), ep, max_idle);
        pcomm->Start();
        _comms.push_back(pcomm);

        // optionally garbage collect expired handles:
        std::erase_if(_comms, std::mem_fn(&Handle::expired));
    }

    void TransmitData(std::string_view msg) {
        for (auto& handle : _comms)
            if (auto pcomm = handle.lock())
                pcomm->TransmitData(msg);
    }

    void Stop() {
        for (auto& handle : _comms)
            if (auto pcomm = handle.lock())
                pcomm->Stop();
    }
};

int main() {
    using std::this_thread::sleep_for;

    boost::asio::thread_pool ctx(1);
    TCPClient                c(ctx.get_executor());

    c.Add({{}, 8989});
    c.Add({{}, 8990}, 1s); // shorter timeout for demo

    c.TransmitData("Hello world\n");

    c.Add({{}, 8991});

    sleep_for(2s); // times out second connection

    c.TransmitData("Three is a crowd\n"); // only delivered to 8989 and 8991

    sleep_for(1s); // allow for delivery

    c.Stop();
    ctx.join();
}

打印(在 Coliru 上):

for p in {8989..8991}; do netcat -t -l -p $p& done
sleep .5; ./a.out
Hello world
Connect: Success
Connect: Success
Hello world
Connect: Success
Write 12 bytes to 0.0.0.0:8989 Success
Write 12 bytes to 0.0.0.0:8990 Success
Timeout
Comm closed (0.0.0.0:8990)
Write Three is a crowd
17Three is a crowd
 bytes to 0.0.0.0:8989 Success
Write 17 bytes to 0.0.0.0:8991 Success
Comm closed (0.0.0.0:8989)
Comm closed (0.0.0.0:8991)

那里的输出有点乱序。本地现场演示: