async_write有兩個對外接口:
template <typename AsyncWriteStream, typename Allocator, typename WriteHandler>
inline BOOST_ASIO_INITFN_RESULT_TYPE(WriteHandler,
void (boost::system::error_code, std::size_t)) // 這個宏函數得到的就是void
async_write(AsyncWriteStream& s,
boost::asio::basic_streambuf<Allocator>& b,
WriteHandler&& handler)
{
return async_write(s, basic_streambuf_ref<Allocator>(b),
WriteHandler&& (handler));
}
template <typename AsyncWriteStream, typename Allocator,
typename CompletionCondition, typename WriteHandler>
inline BOOST_ASIO_INITFN_RESULT_TYPE(WriteHandler,
void (boost::system::error_code, std::size_t)) // 同樣是void
async_write(AsyncWriteStream& s,
boost::asio::basic_streambuf<Allocator>& b,
CompletionCondition completion_condition,
WriteHandler&& handler)
{
return async_write(s, basic_streambuf_ref<Allocator>(b),
completion_condition, WriteHandler&& (handler));
}
分別是指定寫入的completion_condition與不指定的版本,這2個函數會調用另外2種重載版本的async_write。對於未指定complete_condition的會傳入一個類型爲transfer_all_t的僞函數:
class transfer_all_t
{
public:
typedef std::size_t result_type;
template <typename Error>
std::size_t operator()(const Error& err, std::size_t)
{
return !!err ? 0 : default_max_transfer_size;
}
};
簡而言之,這個默認completion_condition的意思就是在不報錯的情況下把能讀的一次讀完就行了。
總之不管是否傳入completion_condition,最終都會調用到下面這個函數:
template <typename AsyncWriteStream, typename DynamicBuffer,
typename CompletionCondition, typename WriteHandler>
void async_write(AsyncWriteStream& s,
DynamicBuffer&& buffers,
CompletionCondition completion_condition,
WriteHandler&& handler,
typename enable_if<
is_dynamic_buffer<DynamicBuffer>::value
>::type*)
{
// 對傳進來的回調函數類型進行檢驗
BOOST_ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check;
async_completion<WriteHandler,
void (boost::system::error_code, std::size_t)> init(handler);
detail::write_dynbuf_op<AsyncWriteStream, // write_synbuf_op是個僞函數
typename decay<DynamicBuffer>::type,
CompletionCondition, BOOST_ASIO_HANDLER_TYPE(
WriteHandler, void (boost::system::error_code, std::size_t))>( // 這裏實際上就是創建個臨時變量調用operator()
s, DynamicBuffer&& (buffers), // 這行開始下面都是函數調用的參數
completion_condition, init.completion_handler)(
boost::system::error_code(), 0, 1);
return init.result.get(); // get()裏面是空的。。
}
下面看write_dynbuf_op源碼,這裏略去了一堆邏輯簡單但很長的構造函數,就是單純的成員變量賦值:
template <typename AsyncWriteStream, typename DynamicBuffer,
typename CompletionCondition, typename WriteHandler>
class write_dynbuf_op
{
public:
// 省略構造函數
void operator()(const boost::system::error_code& ec,
std::size_t bytes_transferred, int start = 0)
{
switch (start)
{
case 1:
async_write(stream_, buffers_.data(), completion_condition_, write_dynbuf_op&& (*this));
return; default:
buffers_.consume(bytes_transferred);
handler_(ec, static_cast<const std::size_t&>(bytes_transferred));
}
}
//這些成員類型雖然都是模板,但都可以根據名字跟前面調用的地方對應起來。
AsyncWriteStream& stream_;
DynamicBuffer buffers_;
CompletionCondition completion_condition_;
WriteHandler handler_;
};
這一段代碼的邏輯比較關鍵,剛看到可能會很困惑,所以這裏先直接劇透這一段代碼的最終邏輯再看爲什麼是這樣的邏輯。
最初調用這個函數會直接調用async_write的另一個重載版本並return。但實際邏輯並不是這樣就結束了,注意這裏調用async_write的時候還傳入了this(也就是write_dynbuf_op自身)作爲下一次async_write的WriteHandler,也就是在讀完時候調用的回調函數,而這將要調用的async_write中實際上是調用的最初傳進來的AsyncWriteStream(這裏採用模板名,方便對應)的async_write_some,所以這裏就變成了相當於循環調用async_write_some了,因爲async_write_some是不保證一次性能把數據全部寫完的。回調write_dynbuf_op時start會傳入寫入的字節數(bytes_trasferred),故當最後寫入字節數爲0時,也就是寫完時,就會走default段,調用最初傳入的WriteHandler回調函數,到此一個async_write就執行完了。(實際上異步IO大體邏輯這裏就總結完了,對細節感興趣的可以接着往下看)
接下來先看要調用async_write_some的那個async_write重載版本:
template <typename AsyncWriteStream, typename ConstBufferSequence,
typename CompletionCondition, typename WriteHandler>
void async_write(AsyncWriteStream& s, const ConstBufferSequence& buffers,
CompletionCondition completion_condition,
BOOST_ASIO_MOVE_ARG(WriteHandler) handler,
typename enable_if<
is_const_buffer_sequence<ConstBufferSequence>::value
>::type*)
{
// 回調函數類型檢查
BOOST_ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check;
async_completion<WriteHandler,
void (boost::system::error_code, std::size_t)> init(handler);
detail::start_write_buffer_sequence_op(s, buffers,
boost::asio::buffer_sequence_begin(buffers), completion_condition,
init.completion_handler);
return init.result.get(); // void
}
template <typename AsyncWriteStream, typename ConstBufferSequence,
typename ConstBufferIterator, typename CompletionCondition,
typename WriteHandler>
inline void start_write_buffer_sequence_op(AsyncWriteStream& stream,
const ConstBufferSequence& buffers, const ConstBufferIterator&,
CompletionCondition completion_condition, WriteHandler& handler)
{
detail::write_op<AsyncWriteStream, ConstBufferSequence, // write_op是一個僞函數
ConstBufferIterator, CompletionCondition, WriteHandler>(
stream, buffers, completion_condition, handler)(
boost::system::error_code(), 0, 1);
}
這裏提一下上面所有async_write的重載版本,除了最初的對外接口,剩下的都是根據enable_if來區分的,這個屬於模板元編程的範疇,這裏就不展開講了,感興趣的可以查查SFINAE(匹配失敗不是錯誤)。
可以看到上面最後還是調用到了write_op,這也是個僞函數,結構很像前面的write_dynbuf_op,下面看它的源碼(及它的基類),同樣省略一坨構造函數:
template <typename CompletionCondition>
class base_from_completion_cond
{
protected:
explicit base_from_completion_cond(CompletionCondition completion_condition)
: completion_condition_(completion_condition) {}
std::size_t check_for_completion(
const boost::system::error_code& ec,
std::size_t total_transferred)
{
return detail::adapt_completion_condition_result( // 這個adapt_completion_condition_result啥都不做
completion_condition_(ec, total_transferred)); // 調用子類傳入的completion_condition
}
private:
CompletionCondition completion_condition_;
};
template <typename AsyncWriteStream, typename ConstBufferSequence,
typename ConstBufferIterator, typename CompletionCondition,
typename WriteHandler>
class write_op
: detail::base_from_completion_cond<CompletionCondition>
{
public:
// 省略一坨構造函數
void operator()(const boost::system::error_code& ec,
std::size_t bytes_transferred, int start = 0)
{
std::size_t max_size;
switch (start_ = start)
{
case 1:
max_size = this->check_for_completion(ec, buffers_.total_consumed());
do
{
stream_.async_write_some(buffers_.prepare(max_size), // 這裏調用async_write_some
write_op&& (*this)); // 注意最後一個參數,同樣的遞歸調用寫法
return; default:
buffers_.consume(bytes_transferred); // 移除緩衝中前面bytes_transferred字節的數據
if ((!ec && bytes_transferred == 0) || buffers_.empty())
break;
max_size = this->check_for_completion(ec, buffers_.total_consumed());
} while (max_size > 0);
handler_(ec, buffers_.total_consumed());
}
}
AsyncWriteStream& stream_;
boost::asio::detail::consuming_buffers<const_buffer,
ConstBufferSequence, ConstBufferIterator> buffers_;
int start_;
WriteHandler handler_;
};
基類功能很簡單,僅僅提供一個調用傳入的completion_condition的接口。write_op中才真正調用到了async_write_some。
async_write_some是basic_stream_socket的一個成員函數,熟悉ASIO尿性的都知道,這個函數肯定會調用某個服務類的具體邏輯函數。猜得沒錯,它會調用reactive_socket_service_base的async_send函數:
// Start an asynchronous send. The data being sent must be valid for the
// lifetime of the asynchronous operation.
template <typename ConstBufferSequence, typename Handler>
void async_send(base_implementation_type& impl, // impl是basic_stream_socket的套接字相關數據
const ConstBufferSequence& buffers,
socket_base::message_flags flags, Handler& handler)
{
bool is_continuation = boost_asio_handler_cont_helpers::is_continuation(handler); // 寫死的返回false
// Allocate and construct an operation to wrap the handler.
typedef reactive_socket_send_op<ConstBufferSequence, Handler> op;
typename op::ptr p = { boost::asio::detail::addressof(handler),
op::ptr::allocate(handler), 0 };
p.p = new (p.v) op(impl.socket_, impl.state_, buffers, flags, handler); // placement new,將回調函數包裹起來
start_op(impl, reactor::write_op, p.p, is_continuation, true,
((impl.state_ & socket_ops::stream_oriented)
&& buffer_sequence_adapter<boost::asio::const_buffer,
ConstBufferSequence>::all_empty(buffers))); // 最後一個參數是緩衝區是否爲空的bool值
p.v = p.p = 0;
}
大家可能會對async_send的第一個參數impl有點疑問,每個basic_stream_socket都有它自己的base_implementation_type,僅含該套接字的一些數據,它大概長這樣:
// The implementation type of the socket.
struct base_implementation_type
{
// The native socket representation.
socket_type socket_;
// The current state of the socket.
socket_ops::state_type state_;
// Per-descriptor data used by the reactor.
reactor::per_descriptor_data reactor_data_;
};
再回到async_send函數上,它最後調用了start_op,傳入reactor_op::write_op(一個枚舉值)代表當前操作是一個讀操作(可回顧我前面的博客)。
接下來看start_op函數:
void reactive_socket_service_base::start_op(
reactive_socket_service_base::base_implementation_type& impl,
int op_type, reactor_op* op, bool is_continuation, // op_type爲write_op,reactor_op中包含了大部分關鍵數據與操作邏輯
bool is_non_blocking, bool noop) // is_non_blocking爲true,noop代表當前緩衝區是否爲空
{
if (!noop)
{ // 若緩衝區不爲空
if ((impl.state_ & socket_ops::non_blocking)
|| socket_ops::set_internal_non_blocking(
impl.socket_, impl.state_, true, op->ec_))
{
reactor_.start_op(op_type, impl.socket_, // 這個reactor_的類型是epoll_reactor,是一個私有成員。
impl.reactor_data_, op, is_continuation, is_non_blocking);
return;
}
}
//緩衝區爲空,相當於已經寫完了,直接交給scheduler去處理後續邏輯,讓scheduler調用完成後的回調函數。
reactor_.post_immediate_completion(op, is_continuation);
}
關於post_immediate_completion的邏輯前面的博客已經講過了,這裏就不再重複了。接下來看epoll_reactor::start_op中是如何寫數據的。(最好結合這篇博客來理解)
epoll_reactor::start_op的邏輯有點複雜(甚至有些地方我自己都沒看懂。。。),故這裏只截取在我們這篇文章所說的情況中將執行的邏輯代碼:
void epoll_reactor::start_op(int op_type, socket_type descriptor,
epoll_reactor::per_descriptor_data& descriptor_data, reactor_op* op,
bool is_continuation, bool allow_speculative) // is_continuation爲false,allow_speculative爲true
{
// 省略兩個不會執行到的if塊。。。
if (descriptor_data->op_queue_[op_type].empty())
{
if (allow_speculative
&& (op_type != read_op
|| descriptor_data->op_queue_[except_op].empty()))
{
if (descriptor_data->try_speculative_[op_type])
{
if (reactor_op::status status = op->perform())
{
if (status == reactor_op::done_and_exhausted)
if (descriptor_data->registered_events_ != 0)
descriptor_data->try_speculative_[op_type] = false;
descriptor_lock.unlock();
scheduler_.post_immediate_completion(op, is_continuation);
return;
}
}
// 省略一個不會執行到的if。。。
if (op_type == write_op)
{
if ((descriptor_data->registered_events_ & EPOLLOUT) == 0)
{
epoll_event ev = { 0, { 0 } };
ev.events = descriptor_data->registered_events_ | EPOLLOUT;
ev.data.ptr = descriptor_data;
if (epoll_ctl(epoll_fd_, EPOLL_CTL_MOD, descriptor, &ev) == 0)
{
descriptor_data->registered_events_ |= ev.events;
}
else
{
op->ec_ = boost::system::error_code(errno,
boost::asio::error::get_system_category());
scheduler_.post_immediate_completion(op, is_continuation);
return;
}
}
}
}
else
{
if (op_type == write_op)
{
descriptor_data->registered_events_ |= EPOLLOUT;
}
epoll_event ev = { 0, { 0 } };
ev.events = descriptor_data->registered_events_;
ev.data.ptr = descriptor_data;
epoll_ctl(epoll_fd_, EPOLL_CTL_MOD, descriptor, &ev);
}
}
descriptor_data->op_queue_[op_type].push(op);
scheduler_.work_started();
}
總結來說,就是交給scheduler,等到套接字變爲可寫之後再執行寫入操作。這裏面的邏輯我前面的博客也有講,這裏就不展開討論了。
這裏調用的流程都講完了,那麼真正的寫數據邏輯在哪呢。這時候再回到前面跳過的reactive_socket_service_base::async_send中的reactive_socket_send_op,這裏那個關鍵片段截出來:
// Allocate and construct an operation to wrap the handler.
typedef reactive_socket_send_op<ConstBufferSequence, Handler> op;
typename op::ptr p = { boost::asio::detail::addressof(handler),
op::ptr::allocate(handler), 0 };
p.p = new (p.v) op(impl.socket_, impl.state_, buffers, flags, handler);
當初只說這個op是回調函數的包裝類,實際上沒這麼簡單,這裏面還有數據寫入操作。reactive_socket_send_op繼承自reactor_op代表它代表一個(實際不止一個)由epoll_reactor觸發的操作。
template <typename ConstBufferSequence, typename Handler>
class reactive_socket_send_op :
public reactive_socket_send_op_base<ConstBufferSequence>
{
public:
BOOST_ASIO_DEFINE_HANDLER_PTR(reactive_socket_send_op); // 這個宏定義裏面是ptr的定義
reactive_socket_send_op(socket_type socket,
socket_ops::state_type state, const ConstBufferSequence& buffers,
socket_base::message_flags flags, Handler& handler)
: reactive_socket_send_op_base<ConstBufferSequence>(socket,
state, buffers, flags, &reactive_socket_send_op::do_complete),
handler_(BOOST_ASIO_MOVE_CAST(Handler)(handler))
{
handler_work<Handler>::start(handler_); // 這行實際上啥都沒幹,start裏面是空函數體
}
static void do_complete(void* owner, operation* base,
const boost::system::error_code& /*ec*/,
std::size_t /*bytes_transferred*/)
{
// Take ownership of the handler object.
reactive_socket_send_op* o(static_cast<reactive_socket_send_op*>(base));
ptr p = { boost::asio::detail::addressof(o->handler_), o, o };
handler_work<Handler> w(o->handler_);
// 這段註釋我還是決定採用原文就好,因爲我也翻譯不好。。大致意思就是要對這個回調函數指針複製一份,方便原版的銷燬。
// Make a copy of the handler so that the memory can be deallocated before
// the upcall is made. Even if we're not about to make an upcall, a
// sub-object of the handler may be the true owner of the memory associated
// with the handler. Consequently, a local copy of the handler is required
// to ensure that any owning sub-object remains valid until after we have
// deallocated the memory here.
detail::binder2<Handler, boost::system::error_code, std::size_t>
handler(o->handler_, o->ec_, o->bytes_transferred_);
p.h = boost::asio::detail::addressof(handler.handler_); // addressof就是取地址
p.reset(); // 這裏會銷燬p中持有的回調函數指針。
// Make the upcall if required.
if (owner)
{
fenced_block b(fenced_block::half); // 這裏實際上啥都沒做
w.complete(handler, handler.handler_); // 這裏真正調用最初我們傳入的寫完後調用的回調函數。
}
}
private:
Handler handler_;
};
這個do_complete是一個接口函數,會由scheduler在它的run函數中調用。當然,調用的時候已經寫完數據了。到這裏還是沒看到寫數據邏輯,實際上這段邏輯在reactive_socket_send_op的子類reactive_socket_send_op_base中:
template <typename ConstBufferSequence>
class reactive_socket_send_op_base : public reactor_op
{
public:
reactive_socket_send_op_base(socket_type socket,
socket_ops::state_type state, const ConstBufferSequence& buffers,
socket_base::message_flags flags, func_type complete_func)
: reactor_op(&reactive_socket_send_op_base::do_perform, complete_func),
socket_(socket),
state_(state),
buffers_(buffers),
flags_(flags)
{
}
// 調用這個函數時就代表要寫數據了
static status do_perform(reactor_op* base)
{
reactive_socket_send_op_base* o(
static_cast<reactive_socket_send_op_base*>(base));
buffer_sequence_adapter<boost::asio::const_buffer,
ConstBufferSequence> bufs(o->buffers_);
status result = socket_ops::non_blocking_send(o->socket_, // 這個函數會調用原生的::sendmsg方法發送數據
bufs.buffers(), bufs.count(), o->flags_,
o->ec_, o->bytes_transferred_) ? done : not_done;
if (result == done)
if ((o->state_ & socket_ops::stream_oriented) != 0)
if (o->bytes_transferred_ < bufs.total_size())
result = done_and_exhausted;
return result;
}
private:
socket_type socket_;
socket_ops::state_type state_;
ConstBufferSequence buffers_;
socket_base::message_flags flags_;
};
而這個do_perform函數不是由scheduler來調用的,是由觸發器(也就是那個epoll_reactor)調用的。epoll_reactor負責監聽所有可能會觸發這些op執行的條件,包括定時器到點以及描述符可讀可寫變化,當條件符合的時候便會調用perform_io,調用完後再把這個op交給scheduler並讓它去調用do_complete,也就是完成後的回調函數。