進程架構
nginx作爲高性能的web服務器,採用的是多進程的方式,由一個master進程和若干個worker進程組成。作爲web服務器,nginx的設計從頭至尾體現兩個字:性能。一般推薦worker進程數和機器的CPU核數相同。這樣可以最大限度提升性能。
nginx的進程間通信主要涉及三個方面:master與操作系統的通信,master和worker進程的通信,worker進程與worker進程的通信。
User向master進程發送信號
nginx幾乎重新定義了所有系統消息的處理函數。用戶可以通過執行“nginx -s reload”的方式,調用kill函數對master進程發送特定信號。master進程的自定義處理函數隨即被觸發。
nginx自定義命令和對應的消息類型,處理函數:
ngx_signal_t signals[] = {
{ ngx_signal_value(NGX_RECONFIGURE_SIGNAL),
"SIG" ngx_value(NGX_RECONFIGURE_SIGNAL),
"reload",
ngx_signal_handler },
{ ngx_signal_value(NGX_REOPEN_SIGNAL),
"SIG" ngx_value(NGX_REOPEN_SIGNAL),
"reopen",
ngx_signal_handler },
{ ngx_signal_value(NGX_NOACCEPT_SIGNAL),
"SIG" ngx_value(NGX_NOACCEPT_SIGNAL),
"",
ngx_signal_handler },
{ ngx_signal_value(NGX_TERMINATE_SIGNAL),
"SIG" ngx_value(NGX_TERMINATE_SIGNAL),
"stop",
ngx_signal_handler },
在調用“nginx -s reload”後,會在main函數中觸發ngx_signal_process,向master發送信號。
int main()
{
//部分代碼省略
if (ngx_init_signals(cycle->log) != NGX_OK) {
return 1;
}
//部分代碼省略
if (ngx_signal) {
return ngx_signal_process(cycle, ngx_signal);
}
//部分代碼省略
}
ngx_int_t
ngx_signal_process(ngx_cycle_t *cycle, char *sig)
{
ssize_t n;
ngx_int_t pid;
ngx_file_t file;
ngx_core_conf_t *ccf;
u_char buf[NGX_INT64_LEN + 2];
ngx_log_error(NGX_LOG_NOTICE, cycle->log, 0, "signal process started");
ccf = (ngx_core_conf_t *) ngx_get_conf(cycle->conf_ctx, ngx_core_module);
ngx_memzero(&file, sizeof(ngx_file_t));
file.name = ccf->pid;
file.log = cycle->log;
//獲取master進程的PID
file.fd = ngx_open_file(file.name.data, NGX_FILE_RDONLY,
NGX_FILE_OPEN, NGX_FILE_DEFAULT_ACCESS);
if (file.fd == NGX_INVALID_FILE) {
ngx_log_error(NGX_LOG_ERR, cycle->log, ngx_errno,
ngx_open_file_n " \"%s\" failed", file.name.data);
return 1;
}
n = ngx_read_file(&file, buf, NGX_INT64_LEN + 2, 0);
if (ngx_close_file(file.fd) == NGX_FILE_ERROR) {
ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno,
ngx_close_file_n " \"%s\" failed", file.name.data);
}
if (n == NGX_ERROR) {
return 1;
}
while (n-- && (buf[n] == CR || buf[n] == LF)) { /* void */ }
pid = ngx_atoi(buf, ++n);
if (pid == NGX_ERROR) {
ngx_log_error(NGX_LOG_ERR, cycle->log, 0,
"invalid PID number \"%*s\" in \"%s\"",
n, buf, file.name.data);
return 1;
}
//將PID傳給ngx_os_signal_process
return ngx_os_signal_process(cycle, sig, pid);
}
ngx_int_t
ngx_os_signal_process(ngx_cycle_t *cycle, char *name, ngx_int_t pid)
{
ngx_signal_t *sig;
//依次遍歷signals所以成員,通過name找到對應signal,調用kill函數向master進程發送信號
for (sig = signals; sig->signo != 0; sig++) {
if (ngx_strcmp(name, sig->name) == 0) {
if (kill(pid, sig->signo) != -1) {
return 0;
}
ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno,
"kill(%P, %d) failed", pid, sig->signo);
}
}
return 1;
}
* master進程接收信號 *
master進程收到信號後,觸發自定義處理函數。
ngx_int_t
ngx_init_signals(ngx_log_t *log)
{
ngx_signal_t *sig;
struct sigaction sa;
for (sig = signals; sig->signo != 0; sig++) {
ngx_memzero(&sa, sizeof(struct sigaction));
sa.sa_handler = sig->handler;
sigemptyset(&sa.sa_mask);
if (sigaction(sig->signo, &sa, NULL) == -1) {
#if (NGX_VALGRIND)
ngx_log_error(NGX_LOG_ALERT, log, ngx_errno,
"sigaction(%s) failed, ignored", sig->signame);
#else
ngx_log_error(NGX_LOG_EMERG, log, ngx_errno,
"sigaction(%s) failed", sig->signame);
return NGX_ERROR;
#endif
}
}
return NGX_OK;
}
void
ngx_signal_handler(int signo)
{
for (sig = signals; sig->signo != 0; sig++) {
if (sig->signo == signo) {
break;
}
}
ngx_time_sigsafe_update();
action = "";
//對收到的signal進行相應處理
switch (ngx_process) {
case NGX_PROCESS_MASTER:
case NGX_PROCESS_SINGLE:
switch (signo) {
case ngx_signal_value(NGX_SHUTDOWN_SIGNAL):
ngx_quit = 1;
action = ", shutting down";
break;
case ngx_signal_value(NGX_TERMINATE_SIGNAL):
case SIGINT:
ngx_terminate = 1;
action = ", exiting";
break;
case ngx_signal_value(NGX_NOACCEPT_SIGNAL):
if (ngx_daemonized) {
ngx_noaccept = 1;
action = ", stop accepting connections";
}
break;
//對reload進行處理
case ngx_signal_value(NGX_RECONFIGURE_SIGNAL):
ngx_reconfigure = 1;
action = ", reconfiguring";
break;
//... ...
}
master 進程在收到HUP信號後,將ngx_reconfigure 置爲1。等待master和worker進程進行處理。
master和worker進程間通信
nginx在收到reload命令後,master進程會重新獲取配置文件,加載各個模塊,fork新的worker進程。而原先的worker進程在處理完所有連接任務後,自動銷燬。
創建worker進程的前,會先調用socketpair,創建一個socket對,這對socket就是用來進行master和worker進程間通信的。
進程結構體定義:
typedef struct {
ngx_pid_t pid;
int status;
ngx_socket_t channel[2]; //用於進程間通信
ngx_spawn_proc_pt proc;
void *data;
char *name;
unsigned respawn:1;
unsigned just_spawn:1;
unsigned detached:1;
unsigned exiting:1;
unsigned exited:1;
} ngx_process_t;
- fork子進程
ngx_processes[s].channel[0]用於master進程寫入,ngx_processes[s].channel[1]用於worker進程監聽socket
ngx_spawn_process
if (socketpair(AF_UNIX, SOCK_STREAM, 0, ngx_processes[s].channel) == -1)
{
ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno,
"socketpair() failed while spawning \"%s\"", name);
return NGX_INVALID_PID;
}
if (ngx_nonblocking(ngx_processes[s].channel[0]) == -1)
if (ngx_nonblocking(ngx_processes[s].channel[1]) == -1)
ngx_channel = ngx_processes[s].channel[1];
pid = fork();
switch (pid) {
case -1:
ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno,
"fork() failed while spawning \"%s\"", name);
ngx_close_channel(ngx_processes[s].channel, cycle->log);
return NGX_INVALID_PID;
case 0:
ngx_pid = ngx_getpid();
proc(cycle, data);
break;
default:
break;
}
2 work進程執行函數,將channel加入epollfd。
epoll句柄
ngx_event_module_t ngx_epoll_module_ctx = {
&epoll_name,
ngx_epoll_create_conf, /* create configuration */
ngx_epoll_init_conf, /* init configuration */
{
ngx_epoll_add_event, /* add an event */
ngx_epoll_del_event, /* delete an event */
ngx_epoll_add_event, /* enable an event */
ngx_epoll_del_event, /* disable an event */
ngx_epoll_add_connection, /* add an connection */
ngx_epoll_del_connection, /* delete an connection */
NULL, /* process the changes */
ngx_epoll_process_events, /* process the events */
ngx_epoll_init, /* init the events */
ngx_epoll_done, /* done the events */
}
};
在worker進程循環前,調用ngx_worker_process_init將socket通信套接字加入epoll中。
worker進程處理:
ngx_worker_process_cycle
ngx_worker_process_init(cycle, worker);
for ( ;; ) {
if (ngx_exiting) {
c = cycle->connections;
for (i = 0; i < cycle->connection_n; i++) {
/* THREAD: lock */
//有可讀事件,調用對應的read句柄函數
if (c[i].fd != -1 && c[i].idle) {
c[i].close = 1;
c[i].read->handler(c[i].read);
}
}
ngx_process_events_and_timers(cycle);
}
將channel添加到epoll中
ngx_int_t
ngx_add_channel_event(ngx_cycle_t *cycle, ngx_fd_t fd, ngx_int_t event,
ngx_event_handler_pt handler)
{
ngx_event_t *ev, *rev, *wev;
ngx_connection_t *c;
c = ngx_get_connection(fd, cycle->log);
if (c == NULL) {
return NGX_ERROR;
}
c->pool = cycle->pool;
rev = c->read;
wev = c->write;
rev->log = cycle->log;
wev->log = cycle->log;
rev->channel = 1;
wev->channel = 1;
ev = (event == NGX_READ_EVENT) ? rev : wev;
ev->handler = handler;
if (ngx_add_conn && (ngx_event_flags & NGX_USE_EPOLL_EVENT) == 0) {
if (ngx_add_conn(c) == NGX_ERROR) {
ngx_free_connection(c);
return NGX_ERROR;
}
} else {
if (ngx_add_event(ev, event, 0) == NGX_ERROR) {
ngx_free_connection(c);
return NGX_ERROR;
}
}
return NGX_OK;
}
3 發送消息
如果向channel中寫入消息失敗,則調用kill函數,直接向對應的PID進程發送信號。
ngx_signal_worker_processes(ngx_cycle_t *cycle, int signo)
if (ch.command) {
if (ngx_write_channel(ngx_processes[i].channel[0],
&ch, sizeof(ngx_channel_t), cycle->log)
== NGX_OK)
{
if (signo != ngx_signal_value(NGX_REOPEN_SIGNAL)) {
ngx_processes[i].exiting = 1;
}
continue;
}
}
ngx_log_debug2(NGX_LOG_DEBUG_CORE, cycle->log, 0,
"kill (%P, %d)", ngx_processes[i].pid, signo);
if (kill(ngx_processes[i].pid, signo) == -1) {
err = ngx_errno;
ngx_log_error(NGX_LOG_ALERT, cycle->log, err,
"kill(%P, %d) failed", ngx_processes[i].pid, signo);
if (err == NGX_ESRCH) {
ngx_processes[i].exited = 1;
ngx_processes[i].exiting = 0;
ngx_reap = 1;
}
continue;
}
4 處理消息
static void
ngx_channel_handler(ngx_event_t *ev)
for ( ;; ) {
n = ngx_read_channel(c->fd, &ch, sizeof(ngx_channel_t), ev->log);
}
worker進程間通信
如上所述,master進程和worker進程的通信是利用的socketpair,因爲master和worker之間的通信一般都是“reload”“stop”“start”之類的命令,對效率要求不是很高。而worker進程之間的通信,一般都是發生在一個http處理過程中,而且會相對比較頻繁。nginx採用了最快的通信方式,共享內存。在共享內存上創建互斥鎖,全局變量的方式,進行worker間進程通信。
利用互斥鎖解決驚羣問題
master創建共享內存,worker進程共享。爲了解決驚羣問題,nginx利用共享內存創建互斥鎖,同一時間,只有獲取到互斥鎖的worker進程才能accept到新的連接。
ngx_shmtx_t ngx_accept_mutex; //共享內存互斥鎖
ngx_int_t
ngx_trylock_accept_mutex(ngx_cycle_t *cycle)
{
if (ngx_shmtx_trylock(&ngx_accept_mutex)) {
ngx_log_debug0(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
"accept mutex locked");
if (ngx_accept_mutex_held
&& ngx_accept_events == 0
&& !(ngx_event_flags & NGX_USE_RTSIG_EVENT))
{
return NGX_OK;
}
if (ngx_enable_accept_events(cycle) == NGX_ERROR) {
ngx_shmtx_unlock(&ngx_accept_mutex);
return NGX_ERROR;
}
ngx_accept_events = 0;
ngx_accept_mutex_held = 1;
return NGX_OK;
}
ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
"accept mutex lock failed: %ui", ngx_accept_mutex_held);
if (ngx_accept_mutex_held) {
if (ngx_disable_accept_events(cycle) == NGX_ERROR) {
return NGX_ERROR;
}
ngx_accept_mutex_held = 0;
}
return NGX_OK;
}
總結
以上就是nginx中採用的所有進程間通信方式,個人感覺都是爲了一個目標,效率。代碼的設計上很值得學習。