事件驅動函數
事件驅動是nginx設計的核心,linux平臺下,nginx會優先使用epoll進行事件處理。main—>master_process_cycle—>ngx_start_worker_process—>ngx_worker_process_cycle—>ngx_worker_process_init,ngx_process_events_and_timers。master進程中創建worker進程,之後在ngx_worker_process_cycle函數中初始化,並打開socketpair,加入epoll中。隨後進入ngx_process_events_and_timers函數,等待http連接請求。nginx作爲web服務器,性能要求很高,對最耗時的IO進行異步處理。註冊到epoll上,作爲一個個事件待觸發。定義的事件處理函數如下:
ngx_event_module_t ngx_epoll_module_ctx = {
&epoll_name,
ngx_epoll_create_conf, /* create configuration */
ngx_epoll_init_conf, /* init configuration */
{
ngx_epoll_add_event, /* add an event */
ngx_epoll_del_event, /* delete an event */
ngx_epoll_add_event, /* enable an event */
ngx_epoll_del_event, /* disable an event */
ngx_epoll_add_connection, /* add an connection */
ngx_epoll_del_connection, /* delete an connection */
NULL, /* process the changes */
ngx_epoll_process_events, /* process the events */
ngx_epoll_init, /* init the events */
ngx_epoll_done, /* done the events */
}
};
等待連接請求
worker進程啓動後調用ngx_worker_process_init,初始化連接,監聽80端口,之後不斷調用ngx_process_events_and_timers 函數,等待http連接請求。
ngx_process_events_and_timers
if (ngx_use_accept_mutex) {
if (ngx_accept_disabled > 0) {
ngx_accept_disabled--;
//多進程情況下通過共享內存的互斥鎖,同一時間只允許一個進程得到訪問,解決驚羣問題。
} else {
//得到互斥鎖後,將對應端口的(Http默認爲80端口)事件加入epoll,等待客戶端連接請求,觸發IO事件。
if (ngx_trylock_accept_mutex(cycle) == NGX_ERROR) {
return;
}
//多進程情況下,使用NGX_POST_EVENTS,將觸發的事件放入隊列中,而不是裏面觸發IO的回調函數
if (ngx_accept_mutex_held) {
flags |= NGX_POST_EVENTS;
} else {
if (timer == NGX_TIMER_INFINITE
|| timer > ngx_accept_mutex_delay)
{
timer = ngx_accept_mutex_delay;
}
}
}
}
//調用的是ngx_epoll_process_events,執行的是epoll_wait,當有事件被觸發時,將執行IO回調函數,或者將被觸發的IO事件添加到鏈表。
(void) ngx_process_events(cycle, timer, flags);
//依次訪問被觸發的IO事件,執行回調
if (ngx_posted_accept_events) {
ngx_event_process_posted(cycle, &ngx_posted_accept_events);
}
//釋放共享互斥鎖
if (ngx_accept_mutex_held) {
ngx_shmtx_unlock(&ngx_accept_mutex);
}
//處理超時事件
if (delta) {
ngx_event_expire_timers();
}
ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
"posted events %p", ngx_posted_events);
//處理被觸發的IO事件回調
if (ngx_posted_events) {
if (ngx_threaded) {
ngx_wakeup_worker_thread(cycle);
} else {
ngx_event_process_posted(cycle, &ngx_posted_events);
}
}
之所以要在多進程時,將觸發的IO事件先添加鏈表,而不是立馬執行回調。是因爲ngx_trylock_accept_mutex函數會搶佔鎖,這樣其他進程就必須等待,直到鎖被釋放纔可以繼續接受事件觸發。而一般的IO回調函數都比較耗時,一旦多個事件同時觸發,等待週期會比較長。
(void) ngx_process_events(cycle, timer, flags)
events = epoll_wait(ep, event_list, (int) nevents, timer);
for (i = 0; i < events; i++) {
c = event_list[i].data.ptr;
instance = (uintptr_t) c & 1;
c = (ngx_connection_t *) ((uintptr_t) c & (uintptr_t) ~1);
rev = c->read;
//多進程情況下不執行handler,而是添加到鏈表中
if (flags & NGX_POST_EVENTS) {
queue = (ngx_event_t **) (rev->accept ?&ngx_posted_accept_events : &ngx_posted_events);
ngx_locked_post_event(rev, queue);
} else {
rev->handler(rev);
}
}
連接建立
當Http客戶端發來連接請求後,觸發讀事件的Handler函數,在初始化時,設定這個函數爲ngx_event_accept。這個函數裏調用accept,建立TCP通信。
ngx_event_accept
lc = ev->data;
ls = lc->listening;
ev->ready = 0;
do {
socklen = NGX_SOCKADDRLEN;
//accept等待連接建立
s = accept(lc->fd, (struct sockaddr *) sa, &socklen);
//當一個進程的free連接數佔連接總數的八分之一時,不在接收新的連接請求
ngx_accept_disabled = ngx_cycle->connection_n / 8
- ngx_cycle->free_connection_n;
//獲取一個可用的連接,並初始化
c = ngx_get_connection(s, ev->log);
c->pool = ngx_create_pool(ls->pool_size, ev->log);
if (c->pool == NULL) {
ngx_close_accepted_connection(c);
return;
}
c->sockaddr = ngx_palloc(c->pool, socklen);
if (c->sockaddr == NULL) {
ngx_close_accepted_connection(c);
return;
}
ngx_memcpy(c->sockaddr, sa, socklen);
log = ngx_palloc(c->pool, sizeof(ngx_log_t));
if (log == NULL) {
ngx_close_accepted_connection(c);
return;
}
*log = ls->log;
//該Http連接所用的recv、send函數
c->recv = ngx_recv;
c->send = ngx_send;
c->recv_chain = ngx_recv_chain;
c->send_chain = ngx_send_chain;
c->log = log;
c->pool->log = log;
c->socklen = socklen;
c->listening = ls;
c->local_sockaddr = ls->sockaddr;
c->local_socklen = ls->socklen;
c->unexpected_eof = 1;
}
rev = c->read;
wev = c->write;
wev->ready = 1;
if (ev->deferred_accept) {
rev->ready = 1;
}
rev->log = log;
wev->log = log;
/*
* TODO: MT: - ngx_atomic_fetch_add()
* or protection by critical section or light mutex
*
* TODO: MP: - allocated in a shared memory
* - ngx_atomic_fetch_add()
* or protection by critical section or light mutex
*/
//nginx的http連接數加1,連接數也是一個共享內存的全局變量
c->number = ngx_atomic_fetch_add(ngx_connection_counter, 1);
//將新的connection加入到epoll中
if (ngx_add_conn && (ngx_event_flags & NGX_USE_EPOLL_EVENT) == 0) {
if (ngx_add_conn(c) == NGX_ERROR) {
ngx_close_accepted_connection(c);
return;
}
}
log->data = NULL;
log->handler = NULL;
//調用監聽套接字的回調函數,這裏是ngx_http_init_connection
ls->handler(c);
if (ngx_event_flags & NGX_USE_KQUEUE_EVENT) {
ev->available--;
}
} while (ev->available);
}
listener的回調函數是http模塊在初始化的時候被賦值的。
static ngx_command_t ngx_http_commands[] = {
{ ngx_string("http"),
NGX_MAIN_CONF|NGX_CONF_BLOCK|NGX_CONF_NOARGS,
ngx_http_block,
0,
0,
NULL },
ngx_null_command
};
在Http模塊被加載的時候,會調用模塊初始化函數,ngx_http_block—>ngx_http_optimize_servers—>ngx_http_init_listening—>ngx_http_add_listening。在ngx_http_add_listening中創建一個監聽結構體ngx_listening_t,並初始化,設置回調句柄ls->handler = ngx_http_init_connection。
ngx_http_init_connection函數中又進一步設定connection讀事件的回調函數爲ngx_http_wait_request_handler,寫事件的回調函數是ngx_http_empty_handler。並將讀事件加入epoll監聽隊列,等待客戶端的http請求報文。
處理請求
ngx_http_wait_request_handler函數分配內存,然後接收報文,並調用ngx_http_process_request_line解析報文的每一行信息,進行Http處理的每個環節。ngx_http_process_request_line—>ngx_http_handler—>ngx_http_core_run_phases。然後執行各個phases函數。
static void
ngx_http_wait_request_handler(ngx_event_t *rev)
{
size_t size;
ssize_t n;
ngx_buf_t *b;
ngx_connection_t *c;
ngx_http_connection_t *hc;
ngx_http_core_srv_conf_t *cscf;
c = rev->data;
//連接響應超時,關閉連接
if (rev->timedout) {
ngx_log_error(NGX_LOG_INFO, c->log, NGX_ETIMEDOUT, "client timed out");
ngx_http_close_connection(c);
return;
}
if (c->close) {
ngx_http_close_connection(c);
return;
}
hc = c->data;
cscf = ngx_http_get_module_srv_conf(hc->conf_ctx, ngx_http_core_module);
size = cscf->client_header_buffer_size;
b = c->buffer;
//開闢一塊緩存,用來接收Http報文
if (b == NULL) {
b = ngx_create_temp_buf(c->pool, size);
if (b == NULL) {
ngx_http_close_connection(c);
return;
}
c->buffer = b;
} else if (b->start == NULL) {
b->start = ngx_palloc(c->pool, size);
if (b->start == NULL) {
ngx_http_close_connection(c);
return;
}
b->pos = b->start;
b->last = b->start;
b->end = b->last + size;
}
//從網絡緩存裏讀出http報文
n = c->recv(c, b->last, size);
if (n == NGX_ERROR) {
ngx_http_close_connection(c);
return;
}
if (n == 0) {
ngx_log_error(NGX_LOG_INFO, c->log, 0,
"client closed connection");
ngx_http_close_connection(c);
return;
}
b->last += n;
c->log->action = "reading client request line";
ngx_reusable_connection(c, 0);
c->data = ngx_http_create_request(c);
if (c->data == NULL) {
ngx_http_close_connection(c);
return;
}
//這裏將讀事件的回調句柄改爲ngx_http_process_request_line,是因爲一個
//http請求的報文可能會很長,可能會超出TCP的接收buffer,這樣就會多次觸發讀
//事件
rev->handler = ngx_http_process_request_line;
ngx_http_process_request_line(rev);
}
很多基於nginx的二次開發都是通過在Http報文里加入一些屬性,然後開發對應的phrase方法,將phrase方法加入ngx_http_core_commands中。
static ngx_command_t ngx_http_core_commands[] = {
{ ngx_string("variables_hash_max_size"),
NGX_HTTP_MAIN_CONF|NGX_CONF_TAKE1,
ngx_conf_set_num_slot,
NGX_HTTP_MAIN_CONF_OFFSET,
offsetof(ngx_http_core_main_conf_t, variables_hash_max_size),
NULL },
{ ngx_string("variables_hash_bucket_size"),
NGX_HTTP_MAIN_CONF|NGX_CONF_TAKE1,
ngx_conf_set_num_slot,
NGX_HTTP_MAIN_CONF_OFFSET,
offsetof(ngx_http_core_main_conf_t, variables_hash_bucket_size),
NULL },
//... ...
連接關閉
客戶端關閉http連接,調用ngx_http_finalize_request—>ngx_http_finalize_connection—>ngx_http_close_request—>ngx_http_free_request,ngx_http_close_connection。釋放內存,關閉套接字,銷燬內存池。