nginx&apeche       

request->nginx->index模块->配置文件->response

##0.基础

http://garydai.github.io/2013/06/01/IO.html

http://garydai.github.io/2015/01/29/socket.html

##1.框架

struct ngx_cycle_s {
    void                  ****conf_ctx;
    ngx_pool_t               *pool;

    ngx_log_t                *log;
    ngx_log_t                 new_log;

    ngx_connection_t        **files;
    ngx_connection_t         *free_connections;
    ngx_uint_t                free_connection_n;

    ngx_array_t               listening;
    ngx_array_t               pathes;
    ngx_list_t                open_files;
    ngx_list_t                shared_memory;

    ngx_uint_t                connection_n;
    ngx_uint_t                files_n;

    ngx_connection_t         *connections;
    ngx_event_t              *read_events;
    ngx_event_t              *write_events;

    ngx_cycle_t              *old_cycle;

    ngx_str_t                 conf_file;
    ngx_str_t                 conf_param;
    ngx_str_t                 conf_prefix;
    ngx_str_t                 prefix;
    ngx_str_t                 lock_file;
    ngx_str_t                 hostname;
};

cycle = ngx_init_cycle(&init_cycle);
1.1.main函数里调用ngx\_init_cycle初始化一个主进程实例,并创建监听套接字



ngx_master_process_cycle(cycle);
1.2.创建worker进程

1.3.worker进程竞争accept锁

1.4.某一进程accept事件发生

1.5.取空闲的连接

##2.主要数据结构

struct ngx_listening_s {
    ngx_socket_t        fd;

    struct sockaddr    *sockaddr;
    socklen_t           socklen;    /* size of sockaddr */
    size_t              addr_text_max_len;
    ngx_str_t           addr_text;

    int                 type;

    int                 backlog;
    int                 rcvbuf;
    int                 sndbuf;

    /* handler of accepted connection */
    ngx_connection_handler_pt   handler;

    void               *servers;  /* array of ngx_http_in_addr_t, for example */

    ngx_log_t           log;
    ngx_log_t          *logp;

    size_t              pool_size;
    /* should be here because of the AcceptEx() preread */
    size_t              post_accept_buffer_size;
    /* should be here because of the deferred accept */
    ngx_msec_t          post_accept_timeout;

    ngx_listening_t    *previous;
    ngx_connection_t   *connection;

    unsigned            open:1;
    unsigned            remain:1;
    unsigned            ignore:1;

    unsigned            bound:1;       /* already bound */
    unsigned            inherited:1;   /* inherited from previous process */
    unsigned            nonblocking_accept:1;
    unsigned            listen:1;
    unsigned            nonblocking:1;
    unsigned            shared:1;    /* shared between threads or processes */
    unsigned            addr_ntop:1;

#if (NGX_HAVE_INET6 && defined IPV6_V6ONLY)
    unsigned            ipv6only:2;
#endif

#if (NGX_HAVE_DEFERRED_ACCEPT)
    unsigned            deferred_accept:1;
    unsigned            delete_deferred:1;
    unsigned            add_deferred:1;
#ifdef SO_ACCEPTFILTER
    char               *accept_filter;
#endif
#endif

};

struct ngx_connection_s {
    void               *data;
    ngx_event_t        *read;
    ngx_event_t        *write;

    ngx_socket_t        fd;

    ngx_recv_pt         recv;
    ngx_send_pt         send;
    ngx_recv_chain_pt   recv_chain;
    ngx_send_chain_pt   send_chain;

    ngx_listening_t    *listening;

    off_t               sent;

    ngx_log_t          *log;

    ngx_pool_t         *pool;

    struct sockaddr    *sockaddr;
    socklen_t           socklen;
    ngx_str_t           addr_text;

#if (NGX_SSL)
    ngx_ssl_connection_t  *ssl;
#endif

    struct sockaddr    *local_sockaddr;
    socklen_t           local_socklen;

    ngx_buf_t          *buffer;

    ngx_atomic_uint_t   number;

    ngx_uint_t          requests;

    unsigned            buffered:8;

    unsigned            log_error:3;     /* ngx_connection_log_error_e */

    unsigned            single_connection:1;
    unsigned            unexpected_eof:1;
    unsigned            timedout:1;
    unsigned            error:1;
    unsigned            destroyed:1;

    unsigned            idle:1;
    unsigned            close:1;

    unsigned            sendfile:1;
    unsigned            sndlowat:1;
    unsigned            tcp_nodelay:2;   /* ngx_connection_tcp_nodelay_e */
    unsigned            tcp_nopush:2;    /* ngx_connection_tcp_nopush_e */

#if (NGX_HAVE_IOCP)
    unsigned            accept_context_updated:1;
#endif

#if (NGX_THREADS)
    ngx_atomic_t        lock;
#endif
};



struct ngx_event_s {
    void            *data;

    unsigned         write:1;

    unsigned         accept:1;

    /* used to detect the stale events in kqueue, rtsig, and epoll */
    unsigned         instance:1;

    /*
     * the event was passed or would be passed to a kernel;
     * in aio mode - operation was posted.
     */
    unsigned         active:1;

    unsigned         disabled:1;

    /* the ready event; in aio mode 0 means that no operation can be posted */
    unsigned         ready:1;

    unsigned         oneshot:1;

    /* aio operation is complete */
    unsigned         complete:1;

    unsigned         eof:1;
    unsigned         error:1;

    unsigned         timedout:1;
    unsigned         timer_set:1;

    unsigned         delayed:1;

    unsigned         read_discarded:1;

    unsigned         unexpected_eof:1;

    unsigned         deferred_accept:1;

    /* the pending eof reported by kqueue or in aio chain operation */
    unsigned         pending_eof:1;

#if !(NGX_THREADS)
    unsigned         posted_ready:1;
#endif

#if (NGX_WIN32)
    /* setsockopt(SO_UPDATE_ACCEPT_CONTEXT) was succesfull */
    unsigned         accept_context_updated:1;
#endif

#if (NGX_HAVE_KQUEUE)
    unsigned         kq_vnode:1;

    /* the pending errno reported by kqueue */
    int              kq_errno;
#endif

    /*
     * kqueue only:
     *   accept:     number of sockets that wait to be accepted
     *   read:       bytes to read when event is ready
     *               or lowat when event is set with NGX_LOWAT_EVENT flag
     *   write:      available space in buffer when event is ready
     *               or lowat when event is set with NGX_LOWAT_EVENT flag
     *
     * iocp: TODO
     *
     * otherwise:
     *   accept:     1 if accept many, 0 otherwise
     */

#if (NGX_HAVE_KQUEUE) || (NGX_HAVE_IOCP)
    int              available;
#else
    unsigned         available:1;
#endif

    ngx_event_handler_pt  handler;


#if (NGX_HAVE_AIO)

#if (NGX_HAVE_IOCP)
    ngx_event_ovlp_t ovlp;
#else
    struct aiocb     aiocb;
#endif

#endif

    ngx_uint_t       index;

    ngx_log_t       *log;

    ngx_rbtree_node_t   timer;

    unsigned         closed:1;

    /* to test on worker exit */
    unsigned         channel:1;
    unsigned         resolver:1;

#if (NGX_THREADS)

    unsigned         locked:1;

    unsigned         posted_ready:1;
    unsigned         posted_timedout:1;
    unsigned         posted_eof:1;

#if (NGX_HAVE_KQUEUE)
    /* the pending errno reported by kqueue */
    int              posted_errno;
#endif

#if (NGX_HAVE_KQUEUE) || (NGX_HAVE_IOCP)
    int              posted_available;
#else
    unsigned         posted_available:1;
#endif

    ngx_atomic_t    *lock;
    ngx_atomic_t    *own_lock;

#endif

    /* the links of the posted queue */
    ngx_event_t     *next;
    ngx_event_t    **prev;


#if 0

    /* the threads support */

    /*
     * the event thread context, we store it here
     * if $(CC) does not understand __thread declaration
     * and pthread_getspecific() is too costly
     */

    void            *thr_ctx;

#if (NGX_EVENT_T_PADDING)

    /* event should not cross cache line in SMP */

    uint32_t         padding[NGX_EVENT_T_PADDING];
#endif
#endif
};

connection

nginx每个socket被封装成connection结构ngx_connection_t,ngx_connection_t里还包含socket的读写事件ngx_event_t,及事件发生时调用的回调函数。

event

ngx_event_t

##2.模块

nginx框架由模块组成

struct ngx_module_s {
    ngx_uint_t            ctx_index;
    ngx_uint_t            index;

    ngx_uint_t            spare0;
    ngx_uint_t            spare1;
    ngx_uint_t            spare2;
    ngx_uint_t            spare3;

    ngx_uint_t            version;

    void                 *ctx;
    ngx_command_t        *commands;
    ngx_uint_t            type;

    ngx_int_t           (*init_master)(ngx_log_t *log);

    ngx_int_t           (*init_module)(ngx_cycle_t *cycle);

    ngx_int_t           (*init_process)(ngx_cycle_t *cycle);
    ngx_int_t           (*init_thread)(ngx_cycle_t *cycle);
    void                (*exit_thread)(ngx_cycle_t *cycle);
    void                (*exit_process)(ngx_cycle_t *cycle);

    void                (*exit_master)(ngx_cycle_t *cycle);

    uintptr_t             spare_hook0;
    uintptr_t             spare_hook1;
    uintptr_t             spare_hook2;
    uintptr_t             spare_hook3;
    uintptr_t             spare_hook4;
    uintptr_t             spare_hook5;
    uintptr_t             spare_hook6;
    uintptr_t             spare_hook7;
};

解析模块conf,再初始化模块的配置结构init_conf

2.1.1.handlers : 处理客户端请求并产生响应内容,比如ngx_http_static_moudle模块,负责客户端的静态页面请求,并将对应的静态磁盘文件作为响应内容输出.

2.1.2.filters : 对handlers产生的响应内容做各种过滤处理(即增,删,改),比如 ngx_http_not_modify_filter_moudle,如果通过时间判断前后2次请求的响应内容没有发生任何改变,那么可以直接响应”304 Not Modified”状态标识,让客户端使用缓存即可,而原本发送的响应内容将被清除掉.

2.1.3.upstream : 如果存在后端真实的服务器,nginx 可以利用upstream模块充当反向代理的角色,对客户端的请求只负责转发到后端的真实服务器,如ngx_http_proxy_moudle模块.

2.1.4.load-balance : 在nginx充当中间代理时,由于后端真实服务器往往多于一个,对于某一次客户端的请求,如何选择对应的后端真实服务器来进行处理,这就有类似于ngx_http_upstream_ip_hash_module这样的模块来实现不同的负载均衡算法(Load Balance)。

3.1.处理连接事件模块

3.1.ngx_events_module事件模块

nginx只有ngx_event_core_module和ngx_epoll_module和其他IO实现类型模块

3.2.一个事件一个事件模块实例ngx_event_module_t

###2.1.事件模块

事件模型的初始化与http模块类似,由ngx_events_module驱动整个事件模块的解析和初始化,ngx_event_core_module对events块大部分指令的解析保存重要的配置信息。

nginx的IO属于事件模块

typedef struct {
    ngx_int_t  (*add)(ngx_event_t *ev, ngx_int_t event, ngx_uint_t flags);  //将某描述符的某个事件(可读/可写)添加到多路复用监控里
    ngx_int_t  (*del)(ngx_event_t *ev, ngx_int_t event, ngx_uint_t flags);  //将某描述符的某个事件(可读/可写)从多路复用监控里删除 

    ngx_int_t  (*enable)(ngx_event_t *ev, ngx_int_t event, ngx_uint_t flags);  //启动对某个事件的监控
    ngx_int_t  (*disable)(ngx_event_t *ev, ngx_int_t event, ngx_uint_t flags);  //禁止对某个事件的监控

    ngx_int_t  (*add_conn)(ngx_connection_t *c);    //将指定的连接关联的描述符添加到多路复用的监控里
    ngx_int_t  (*del_conn)(ngx_connection_t *c, ngx_uint_t flags);//将指定的连接关联的描述符从多路复用的监控里删除
    ngx_int_t (*process_changes)(ngx_cycle_t *cycle, ngx_uint_t nowait);//只有kqueue用到
    ngx_int_t (*process_events)(ngx_cycle_t *cycle, ngx_msec_t timer, ngx_uint_t flags);  //阻塞等待事件发生,对发生的事件进行逐个处理

    ngx_int_t (*init)(ngx_cycle_t *cycle, ngx_msec_t timer); //初始化 
    void (*done)(ngx_cycle_t *cycle);//回收资源
} ngx_event_actions_t; 

typedef struct {
    ngx_str_t              *name;

    void                 *(*create_conf)(ngx_cycle_t *cycle);
    char                 *(*init_conf)(ngx_cycle_t *cycle, void *conf);

    ngx_event_actions_t     actions;
} ngx_event_module_t;


ngx_module_t  ngx_event_core_module = {
    NGX_MODULE_V1,
    &ngx_event_core_module_ctx,            /* module context */
    ngx_event_core_commands,               /* module directives */
    NGX_EVENT_MODULE,                      /* module type */
    NULL,                                  /* init master */
    ngx_event_module_init,                 /* init module */
    ngx_event_process_init,                /* init process */
    NULL,                                  /* init thread */
    NULL,                                  /* exit thread */
    NULL,                                  /* exit process */
    NULL,                                  /* exit master */
    NGX_MODULE_V1_PADDING
};


ngx_module_t  ngx_epoll_module = {
    NGX_MODULE_V1,
    &ngx_epoll_module_ctx,               /* module context */
    ngx_epoll_commands,                  /* module directives */
    NGX_EVENT_MODULE,                    /* module type */
    NULL,                                /* init master */
    NULL,                                /* init module */
    NULL,                                /* init process */
    NULL,                                /* init thread */
    NULL,                                /* exit thread */
    NULL,                                /* exit process */
    NULL,                                /* exit master */
    NGX_MODULE_V1_PADDING
};

nginx有两个事件模块,ngx_event_core_module和ngx_epoll_module,ngx_event_core_module主要用于事件模块初始化,ngx_event_core_module对events块大部分指令的解析保存重要的配置信息,ngx_epoll_module实际上就是底层io模型的实现

1.解析events块

2.解析完events块,接着调用所有event module的init_conf回调函数初始化模块的配置结构,ngx_event_core_module和ngx_epoll_module会对配置结构中尚未初始化的一些属性赋默认值,比如默认使用io模型,也就是use指令的默认值

3.初始化事件模块init_module和init_process

init_module:
static ngx_int_t
ngx_event_module_init(ngx_cycle_t *cycle)
{
    .......
      cl = 128;

    size = cl            /* ngx_accept_mutex */
           + cl          /* ngx_connection_counter */
           + cl;         /* ngx_temp_number */

    shm.size = size;
    shm.name.len = sizeof("nginx_shared_zone");
    shm.name.data = (u_char *) "nginx_shared_zone";
    shm.log = cycle->log;

     if (ngx_shm_alloc(&shm) != NGX_OK) {
        return NGX_ERROR;
    }

    shared = shm.addr;

    ngx_accept_mutex_ptr = (ngx_atomic_t *) shared;
    ngx_accept_mutex.spin = (ngx_uint_t) -1;
    //  将accept锁放入共享内存,并将其初始化。
    if (ngx_shmtx_create(&ngx_accept_mutex, (ngx_shmtx_sh_t *) shared,
                         cycle->lock_file.data)
        != NGX_OK)
    {
        return NGX_ERROR;
    }

    ngx_connection_counter = (ngx_atomic_t *) (shared + 1 * cl);//接着放入连接计数器
}

ngx_event_process_init函数是在创建完worker进程后调用的

static ngx_int_t
ngx_event_process_init(ngx_cycle_t *cycle)
{
    //而对于ecf->accept_mutex字段的判断主要是提供用户便利,可以关闭该功能,因为既然均衡策略也有相应的代码逻辑,难保在某些情况下其本身的消耗也许会得不偿失;当然,该字段默认为1,在配置初始化函数ngx_event_core_init_conf()内,有这么一句:ngx_conf_init_value(ecf->accept_mutex, 1);
    if (ccf->master && ccf->worker_processes > 1 && ecf->accept_mutex) {  
    ngx_use_accept_mutex = 1;  
    ngx_accept_mutex_held = 0;  
    ngx_accept_mutex_delay = ecf->accept_mutex_delay;  
  
    } else {  
        ngx_use_accept_mutex = 0;  
    }  
}


//初始化timer
if (ngx_event_timer_init(cycle->log) == NGX_ERROR) {  
    return NGX_ERROR;  
}  



  for (m = 0; ngx_modules[m]; m++) {
        if (ngx_modules[m]->type != NGX_EVENT_MODULE) {
            continue;
        }

        if (ngx_modules[m]->ctx_index != ecf->use) {
            continue;
        }

        module = ngx_modules[m]->ctx;

        if (module->actions.init(cycle, ngx_timer_resolution) != NGX_OK) { //初始化epoll,在lunix下  ngx_epoll_init
            /* fatal */
            exit(2);
        }

        break;
    }



ngx_epoll_init    



static ngx_int_t
ngx_epoll_init(ngx_cycle_t *cycle, ngx_msec_t timer)
{
       ep = epoll_create(cycle->connection_n / 2);//  ep就是epoll的句柄,初值为-1,所以一启动nginx就是调用epoll_create创建句柄,
         if (nevents < epcf->events) {
                  if (event_list) {
                          ngx_free(event_list);
         }

         //初始化nevents和event_list,epcf->events是由ngx_epoll_module的epoll_events指令设置的。nevents和event_list是要传给epoll_wait的参数,nevents是要监听的事件的最大个数,event_list用于存放epoll返回的事件。

         event_list = ngx_alloc(sizeof(struct epoll_event) * epcf->events,ycle->log);

         nevents = epcf->events;

         ngx_event_actions = ngx_epoll_module_ctx.actions; //为抽象事件模型赋值

}

入口ngx_events_block

2.nginx内存管理

2.1.ngx的内存池

###2.2HTTP模块

##4.进程之间的通信

采用socketpair()函数创造一对未命名的unix域套接字来进行主从进程或子进程之间的双向通信

if (socketpair(AF_UNIX, SOCK_STREAM, 0, ngx_processes[s].channel) == -1)

就目前nginx代码来看,子进程并没有往父进程发送任何消息,子进程之间也没有相互通信的逻辑,也许是因为nginx有其它一些更好的进程通信方式,比如共享内存等,所以这种channel通信目前仅做为父进程往子进程发送消息使用,但由于有这个基础在这,如果未来要使用channel做这样的事情,的确是可以的。

4.1.共享内存

slab访问机制

5.解析配置文件

Reference

http://www.cnblogs.com/fll369/archive/2012/11/26/2788780.html