进程架构
nginx作为高性能的web服务器,采用的是多进程的方式,由一个master进程和若干个worker进程组成。作为web服务器,nginx的设计从头至尾体现两个字:性能。一般推荐worker进程数和机器的CPU核数相同。这样可以最大限度提升性能。
nginx的进程间通信主要涉及三个方面:master与操作系统的通信,master和worker进程的通信,worker进程与worker进程的通信。
User向master进程发送信号
nginx几乎重新定义了所有系统消息的处理函数。用户可以通过执行“nginx -s reload”的方式,调用kill函数对master进程发送特定信号。master进程的自定义处理函数随即被触发。
nginx自定义命令和对应的消息类型,处理函数:
ngx_signal_t signals[] = {
{ ngx_signal_value(NGX_RECONFIGURE_SIGNAL),
"SIG" ngx_value(NGX_RECONFIGURE_SIGNAL),
"reload",
ngx_signal_handler },
{ ngx_signal_value(NGX_REOPEN_SIGNAL),
"SIG" ngx_value(NGX_REOPEN_SIGNAL),
"reopen",
ngx_signal_handler },
{ ngx_signal_value(NGX_NOACCEPT_SIGNAL),
"SIG" ngx_value(NGX_NOACCEPT_SIGNAL),
"",
ngx_signal_handler },
{ ngx_signal_value(NGX_TERMINATE_SIGNAL),
"SIG" ngx_value(NGX_TERMINATE_SIGNAL),
"stop",
ngx_signal_handler },
在调用“nginx -s reload”后,会在main函数中触发ngx_signal_process,向master发送信号。
int main()
{
//部分代码省略
if (ngx_init_signals(cycle->log) != NGX_OK) {
return 1;
}
//部分代码省略
if (ngx_signal) {
return ngx_signal_process(cycle, ngx_signal);
}
//部分代码省略
}
ngx_int_t
ngx_signal_process(ngx_cycle_t *cycle, char *sig)
{
ssize_t n;
ngx_int_t pid;
ngx_file_t file;
ngx_core_conf_t *ccf;
u_char buf[NGX_INT64_LEN + 2];
ngx_log_error(NGX_LOG_NOTICE, cycle->log, 0, "signal process started");
ccf = (ngx_core_conf_t *) ngx_get_conf(cycle->conf_ctx, ngx_core_module);
ngx_memzero(&file, sizeof(ngx_file_t));
file.name = ccf->pid;
file.log = cycle->log;
//获取master进程的PID
file.fd = ngx_open_file(file.name.data, NGX_FILE_RDONLY,
NGX_FILE_OPEN, NGX_FILE_DEFAULT_ACCESS);
if (file.fd == NGX_INVALID_FILE) {
ngx_log_error(NGX_LOG_ERR, cycle->log, ngx_errno,
ngx_open_file_n " \"%s\" failed", file.name.data);
return 1;
}
n = ngx_read_file(&file, buf, NGX_INT64_LEN + 2, 0);
if (ngx_close_file(file.fd) == NGX_FILE_ERROR) {
ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno,
ngx_close_file_n " \"%s\" failed", file.name.data);
}
if (n == NGX_ERROR) {
return 1;
}
while (n-- && (buf[n] == CR || buf[n] == LF)) { /* void */ }
pid = ngx_atoi(buf, ++n);
if (pid == NGX_ERROR) {
ngx_log_error(NGX_LOG_ERR, cycle->log, 0,
"invalid PID number \"%*s\" in \"%s\"",
n, buf, file.name.data);
return 1;
}
//将PID传给ngx_os_signal_process
return ngx_os_signal_process(cycle, sig, pid);
}
ngx_int_t
ngx_os_signal_process(ngx_cycle_t *cycle, char *name, ngx_int_t pid)
{
ngx_signal_t *sig;
//依次遍历signals所以成员,通过name找到对应signal,调用kill函数向master进程发送信号
for (sig = signals; sig->signo != 0; sig++) {
if (ngx_strcmp(name, sig->name) == 0) {
if (kill(pid, sig->signo) != -1) {
return 0;
}
ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno,
"kill(%P, %d) failed", pid, sig->signo);
}
}
return 1;
}
* master进程接收信号 *
master进程收到信号后,触发自定义处理函数。
ngx_int_t
ngx_init_signals(ngx_log_t *log)
{
ngx_signal_t *sig;
struct sigaction sa;
for (sig = signals; sig->signo != 0; sig++) {
ngx_memzero(&sa, sizeof(struct sigaction));
sa.sa_handler = sig->handler;
sigemptyset(&sa.sa_mask);
if (sigaction(sig->signo, &sa, NULL) == -1) {
#if (NGX_VALGRIND)
ngx_log_error(NGX_LOG_ALERT, log, ngx_errno,
"sigaction(%s) failed, ignored", sig->signame);
#else
ngx_log_error(NGX_LOG_EMERG, log, ngx_errno,
"sigaction(%s) failed", sig->signame);
return NGX_ERROR;
#endif
}
}
return NGX_OK;
}
void
ngx_signal_handler(int signo)
{
for (sig = signals; sig->signo != 0; sig++) {
if (sig->signo == signo) {
break;
}
}
ngx_time_sigsafe_update();
action = "";
//对收到的signal进行相应处理
switch (ngx_process) {
case NGX_PROCESS_MASTER:
case NGX_PROCESS_SINGLE:
switch (signo) {
case ngx_signal_value(NGX_SHUTDOWN_SIGNAL):
ngx_quit = 1;
action = ", shutting down";
break;
case ngx_signal_value(NGX_TERMINATE_SIGNAL):
case SIGINT:
ngx_terminate = 1;
action = ", exiting";
break;
case ngx_signal_value(NGX_NOACCEPT_SIGNAL):
if (ngx_daemonized) {
ngx_noaccept = 1;
action = ", stop accepting connections";
}
break;
//对reload进行处理
case ngx_signal_value(NGX_RECONFIGURE_SIGNAL):
ngx_reconfigure = 1;
action = ", reconfiguring";
break;
//... ...
}
master 进程在收到HUP信号后,将ngx_reconfigure 置为1。等待master和worker进程进行处理。
master和worker进程间通信
nginx在收到reload命令后,master进程会重新获取配置文件,加载各个模块,fork新的worker进程。而原先的worker进程在处理完所有连接任务后,自动销毁。
创建worker进程的前,会先调用socketpair,创建一个socket对,这对socket就是用来进行master和worker进程间通信的。
进程结构体定义:
typedef struct {
ngx_pid_t pid;
int status;
ngx_socket_t channel[2]; //用于进程间通信
ngx_spawn_proc_pt proc;
void *data;
char *name;
unsigned respawn:1;
unsigned just_spawn:1;
unsigned detached:1;
unsigned exiting:1;
unsigned exited:1;
} ngx_process_t;
- fork子进程
ngx_processes[s].channel[0]用于master进程写入,ngx_processes[s].channel[1]用于worker进程监听socket
ngx_spawn_process
if (socketpair(AF_UNIX, SOCK_STREAM, 0, ngx_processes[s].channel) == -1)
{
ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno,
"socketpair() failed while spawning \"%s\"", name);
return NGX_INVALID_PID;
}
if (ngx_nonblocking(ngx_processes[s].channel[0]) == -1)
if (ngx_nonblocking(ngx_processes[s].channel[1]) == -1)
ngx_channel = ngx_processes[s].channel[1];
pid = fork();
switch (pid) {
case -1:
ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno,
"fork() failed while spawning \"%s\"", name);
ngx_close_channel(ngx_processes[s].channel, cycle->log);
return NGX_INVALID_PID;
case 0:
ngx_pid = ngx_getpid();
proc(cycle, data);
break;
default:
break;
}
2 work进程执行函数,将channel加入epollfd。
epoll句柄
ngx_event_module_t ngx_epoll_module_ctx = {
&epoll_name,
ngx_epoll_create_conf, /* create configuration */
ngx_epoll_init_conf, /* init configuration */
{
ngx_epoll_add_event, /* add an event */
ngx_epoll_del_event, /* delete an event */
ngx_epoll_add_event, /* enable an event */
ngx_epoll_del_event, /* disable an event */
ngx_epoll_add_connection, /* add an connection */
ngx_epoll_del_connection, /* delete an connection */
NULL, /* process the changes */
ngx_epoll_process_events, /* process the events */
ngx_epoll_init, /* init the events */
ngx_epoll_done, /* done the events */
}
};
在worker进程循环前,调用ngx_worker_process_init将socket通信套接字加入epoll中。
worker进程处理:
ngx_worker_process_cycle
ngx_worker_process_init(cycle, worker);
for ( ;; ) {
if (ngx_exiting) {
c = cycle->connections;
for (i = 0; i < cycle->connection_n; i++) {
/* THREAD: lock */
//有可读事件,调用对应的read句柄函数
if (c[i].fd != -1 && c[i].idle) {
c[i].close = 1;
c[i].read->handler(c[i].read);
}
}
ngx_process_events_and_timers(cycle);
}
将channel添加到epoll中
ngx_int_t
ngx_add_channel_event(ngx_cycle_t *cycle, ngx_fd_t fd, ngx_int_t event,
ngx_event_handler_pt handler)
{
ngx_event_t *ev, *rev, *wev;
ngx_connection_t *c;
c = ngx_get_connection(fd, cycle->log);
if (c == NULL) {
return NGX_ERROR;
}
c->pool = cycle->pool;
rev = c->read;
wev = c->write;
rev->log = cycle->log;
wev->log = cycle->log;
rev->channel = 1;
wev->channel = 1;
ev = (event == NGX_READ_EVENT) ? rev : wev;
ev->handler = handler;
if (ngx_add_conn && (ngx_event_flags & NGX_USE_EPOLL_EVENT) == 0) {
if (ngx_add_conn(c) == NGX_ERROR) {
ngx_free_connection(c);
return NGX_ERROR;
}
} else {
if (ngx_add_event(ev, event, 0) == NGX_ERROR) {
ngx_free_connection(c);
return NGX_ERROR;
}
}
return NGX_OK;
}
3 发送消息
如果向channel中写入消息失败,则调用kill函数,直接向对应的PID进程发送信号。
ngx_signal_worker_processes(ngx_cycle_t *cycle, int signo)
if (ch.command) {
if (ngx_write_channel(ngx_processes[i].channel[0],
&ch, sizeof(ngx_channel_t), cycle->log)
== NGX_OK)
{
if (signo != ngx_signal_value(NGX_REOPEN_SIGNAL)) {
ngx_processes[i].exiting = 1;
}
continue;
}
}
ngx_log_debug2(NGX_LOG_DEBUG_CORE, cycle->log, 0,
"kill (%P, %d)", ngx_processes[i].pid, signo);
if (kill(ngx_processes[i].pid, signo) == -1) {
err = ngx_errno;
ngx_log_error(NGX_LOG_ALERT, cycle->log, err,
"kill(%P, %d) failed", ngx_processes[i].pid, signo);
if (err == NGX_ESRCH) {
ngx_processes[i].exited = 1;
ngx_processes[i].exiting = 0;
ngx_reap = 1;
}
continue;
}
4 处理消息
static void
ngx_channel_handler(ngx_event_t *ev)
for ( ;; ) {
n = ngx_read_channel(c->fd, &ch, sizeof(ngx_channel_t), ev->log);
}
worker进程间通信
如上所述,master进程和worker进程的通信是利用的socketpair,因为master和worker之间的通信一般都是“reload”“stop”“start”之类的命令,对效率要求不是很高。而worker进程之间的通信,一般都是发生在一个http处理过程中,而且会相对比较频繁。nginx采用了最快的通信方式,共享内存。在共享内存上创建互斥锁,全局变量的方式,进行worker间进程通信。
利用互斥锁解决惊群问题
master创建共享内存,worker进程共享。为了解决惊群问题,nginx利用共享内存创建互斥锁,同一时间,只有获取到互斥锁的worker进程才能accept到新的连接。
ngx_shmtx_t ngx_accept_mutex; //共享内存互斥锁
ngx_int_t
ngx_trylock_accept_mutex(ngx_cycle_t *cycle)
{
if (ngx_shmtx_trylock(&ngx_accept_mutex)) {
ngx_log_debug0(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
"accept mutex locked");
if (ngx_accept_mutex_held
&& ngx_accept_events == 0
&& !(ngx_event_flags & NGX_USE_RTSIG_EVENT))
{
return NGX_OK;
}
if (ngx_enable_accept_events(cycle) == NGX_ERROR) {
ngx_shmtx_unlock(&ngx_accept_mutex);
return NGX_ERROR;
}
ngx_accept_events = 0;
ngx_accept_mutex_held = 1;
return NGX_OK;
}
ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
"accept mutex lock failed: %ui", ngx_accept_mutex_held);
if (ngx_accept_mutex_held) {
if (ngx_disable_accept_events(cycle) == NGX_ERROR) {
return NGX_ERROR;
}
ngx_accept_mutex_held = 0;
}
return NGX_OK;
}
总结
以上就是nginx中采用的所有进程间通信方式,个人感觉都是为了一个目标,效率。代码的设计上很值得学习。