传出缓冲区需要是类成员,就像data_
,这样使用寿命就得到保证,直到async_write
完成了。
您还可以通过 ASAN/UBSAN 或 Valgrind 等 linter/运行时检查来发现此类问题。
UPDATE
Also
size_t reply_length =
boost::asio::read(s, boost::asio::buffer(reply, request_length));
错误地使用request_length
。通常,请避免随时手动指定缓冲区大小。
此外,您的协议不提供帧,因此您实际上无法为较新的请求保持相同的连接打开(您不知道完整的响应需要多少字节)。我将通过在第一个请求后关闭连接来“修复”它,这样我们就有了一个有效的演示。
还有一个竞争条件continue_
标志,但我会把它留给读者作为驱魔。
当然,要考虑不要泄漏请求类实例。
哦,我还改用了 Boost JSON,因为它似乎更适合:
住在科里鲁 http://coliru.stacked-crooked.com/a/32b2f448dc94cd22
#include <boost/asio.hpp>
#include <boost/json.hpp>
#include <boost/json/src.hpp>
#include <iostream>
using boost::asio::ip::tcp;
using boost::system::error_code;
namespace json = boost::json;
using Value = json::object;
/// NOTE: This class exists exclusively for unit testing.
struct Sample {
int n_;
Value add_n(Value const& request) const { return impl(std::plus<>{}, request); }
Value sub_n(Value const& request) const { return impl(std::minus<>{}, request); }
Value mul_n(Value const& request) const { return impl(std::multiplies<>{}, request); }
Value div_n(Value const& request) const { return impl(std::divides<>{}, request); }
private:
template <typename Op> Value impl(Op op, Value const& req) const {
return (req.contains("VALUE"))
? Value{{"VALUE", op(req.at("VALUE").as_int64(), n_)},
{"SUCCESS", true}}
: Value{{"ERRORS", "Invalid value."}, {"SUCCESS", false}};
}
};
using RequestClassMethod =
std::function<Value(Sample, Value const&)>;
template <class RequestHandler, class RequestClass>
class RequestContext
: public std::enable_shared_from_this<
RequestContext<RequestHandler, RequestClass>> {
public:
using CommandMap = std::map<std::string, RequestHandler>;
RequestContext(tcp::socket socket, CommandMap commands,
RequestClass* request_class_inst)
: socket_(std::move(socket))
, commands_(std::move(commands))
, request_class_inst_(request_class_inst)
{}
void Run() { DoRead(); }
void Kill() { continue_ = false; }
private:
tcp::socket socket_;
CommandMap commands_;
RequestClass* request_class_inst_;
bool continue_ = true;
char data_[2048];
std::string resp_;
void DoRead()
{
socket_.async_read_some(
boost::asio::buffer(data_),
[this, self = this->shared_from_this()](error_code ec, std::size_t length) {
if (!ec) {
DoWrite(length);
}
});
}
void DoWrite(std::size_t length)
{
Value json_resp;
try {
auto json_req = json::parse({data_, length}).as_object();
json_resp = ProcessRequest(json_req);
json_resp["SUCCESS"] = true;
} catch (std::exception const& ex) {
json_resp = {{"SUCCESS", false}, {"ERRORS", ex.what()}};
}
resp_ = json::serialize(json_resp);
boost::asio::async_write(socket_, boost::asio::buffer(resp_),
[this, self = this->shared_from_this()](
error_code ec, size_t bytes_xfered) {
if (!ec)
DoRead();
});
}
Value ProcessRequest(Value request)
{
auto command = request.contains("COMMAND")
? request["COMMAND"].as_string() //
: "";
std::string cmdstr(command.data(), command.size());
// If command is not valid, give a response with an error.
return commands_.contains(cmdstr) && request_class_inst_
? commands_.at(cmdstr)(*request_class_inst_, request)
: Value{{"SUCCESS", false}, {"ERRORS","Invalid command."}};
}
};
template<class RequestHandler, class RequestClass>
class Server {
public:
using CommandMap = std::map<std::string, RequestHandler>;
Server(boost::asio::io_context& io_context, uint16_t port,
const CommandMap& commands, RequestClass* request_class_inst)
: acceptor_(io_context, {{}, port})
, commands_(commands)
, request_class_inst_(request_class_inst)
{
DoAccept();
}
~Server() { Kill(); }
void Kill() { continue_ = false; }
private:
tcp::acceptor acceptor_;
bool continue_ = true;
CommandMap commands_;
RequestClass *request_class_inst_;
void DoAccept()
{
acceptor_.async_accept(
[this](error_code ec, tcp::socket socket) {
if (!ec)
std::make_shared<
RequestContext<RequestHandler, RequestClass>>(
std::move(socket), commands_, request_class_inst_)
->Run();
DoAccept();
});
}
};
void RunServer(uint16_t port)
{
boost::asio::io_context io_context;
Server<RequestClassMethod, Sample> s(
io_context, port,
{{"ADD_2", std::mem_fn(&Sample::add_n)},
{"SUB_2", std::mem_fn(&Sample::sub_n)},
{"MUL_2", std::mem_fn(&Sample::mul_n)},
{"DIV_2", std::mem_fn(&Sample::div_n)}},
new Sample{2});
io_context.run();
}
void RunServerInBackground(uint16_t port)
{
std::thread t([port] { RunServer(port); });
t.detach();
}
int main() try {
RunServerInBackground(5000);
::sleep(1); // avoid startup race
boost::asio::io_context io;
tcp::socket s(io);
s.connect({{}, 5000});
std::string const request = R"({"COMMAND": "MUL_2", "VALUE" : 21})";
std::cout << "Request: " << std::quoted(request, '\'') << std::endl;
boost::asio::write(s, boost::asio::buffer(request));
s.shutdown(tcp::socket::shutdown_send); // avoid framing problems
error_code ec;
char reply[2048];
size_t reply_length = boost::asio::read(s, boost::asio::buffer(reply), ec);
std::cout << "Reply is: "
<< std::quoted(std::string_view(reply, reply_length), '\'')
<< " (" << ec.message() << ")" << std::endl;
} catch (std::exception const& e) {
std::cerr << "Exception: " << e.what() << "\n";
}
Prints
Request: '{"COMMAND": "MUL_2", "VALUE" : 21}'
Reply is: '{"VALUE":42,"SUCCESS":true}' (End of file)