先来介绍几个地址结构.
struct sockaddr 其实相当于一个基类的地址结构,其他的结构都能够直接转到sockaddr.举个例子比如当sa_family为PF_INET时,sa_data就包含了端口号和ip地址(in_addr结构).
- struct sockaddr {
- sa_family_t sa_family; /* address family, AF_xxx */
- char sa_data[14]; /* 14 bytes of protocol address */
- };
接下来就是sockaddr_in ,它表示了所有的ipv4的地址结构.可以看到他也就相当于sockaddr 的一个子类.
- struct sockaddr_in {
- sa_family_t sin_family; /* Address family */
- __be16 sin_port; /* Port number */
- struct in_addr sin_addr; /* Internet address */
- /* Pad to size of `struct sockaddr'. */
- unsigned char __pad[__SOCK_SIZE__ - sizeof(short int) -
- sizeof(unsigned short int) - sizeof(struct in_addr)];
- };
这里还有一个内核比较新的地质结构sockaddr_storage,他可以容纳所有类型的套接口结构,比如ipv4,ipv6..可以看到它是强制对齐的,相比于sockaddr.
- struct __kernel_sockaddr_storage {
- unsigned short ss_family; /* address family */
- ///每个协议实现自己的地址结构.
- char __data[_K_SS_MAXSIZE - sizeof(unsigned short)];
- /* space to achieve desired size, */
- /* _SS_MAXSIZE value minus size of ss_family */
- } __attribute__ ((aligned(_K_SS_ALIGNSIZE))); /* force desired alignment */
接下来看几个和bind相关的数据结构:
第一个是inet_hashinfo,它主要用来管理 tcp的bind hash bucket(在tcp的初始化函数中会将tcp_hashinfo初始化.然后在tcp_prot中会将tcp_hashinfo付给结构体h,然后相应的我们就可以通过sock中的sock_common域来存取这个值).后面我们会分析这个流程.
- struct inet_hashinfo {
- /* This is for sockets with full identity only. Sockets here will
- * always be without wildcards and will have the following invariant:
- *
- * TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE
- *
- * TIME_WAIT sockets use a separate chain (twchain).
- */
- ///下面会分析这个结构.
- struct inet_ehash_bucket *ehash;
- rwlock_t *ehash_locks;
- unsigned int ehash_size;
- unsigned int ehash_locks_mask;
- /* Ok, let's try this, I give up, we do need a local binding
- * TCP hash as well as the others for fast bind/connect.
- */
- ///表示所有的已经在使用的端口号的信息.这里bhash也就是一个hash链表,而链表的元素是inet_bind_bucket,紧接着我们会分析这个结构.
- struct inet_bind_hashbucket *bhash;
- unsigned int bhash_size;
- /* Note : 4 bytes padding on 64 bit arches */
- /* All sockets in TCP_LISTEN state will be in here. This is the only
- * table where wildcard'd TCP sockets can exist. Hash function here
- * is just local port number.
- */
- ///listening_hash表示所有的处于listen状态的socket.
- struct hlist_head listening_hash[INET_LHTABLE_SIZE];
- /* All the above members are written once at bootup and
- * never written again _or_ are predominantly read-access.
- *
- * Now align to a new cache line as all the following members
- * are often dirty.
- */
- rwlock_t lhash_lock ____cacheline_aligned;
- atomic_t lhash_users;
- wait_queue_head_t lhash_wait;
- struct kmem_cache *bind_bucket_cachep;
- };
struct inet_ehash_bucket管理所有的tcp状态在TCP_ESTABLISHED和TCP_CLOSE之间的socket.这里要注意,twchain表示处于TIME_WAIT的socket.
- struct inet_ehash_bucket {
- struct hlist_head chain;
- struct hlist_head twchain;
- };
inet_bind_bucket结构就是每个使用的端口的信息,最终会把它链接到bhash链表中.
- struct inet_bind_bucket {
- struct net *ib_net;
- ///端口号
- unsigned short port;
- ///表示这个端口是否能够被重复使用.
- signed short fastreuse;
- ///指向下一个端口的inet_bind_bucket 结构.
- struct hlist_node node;
- ///也就是使用这个端口的socket链表
- struct hlist_head owners;
- };
最后一个结构是tcp_hashinfo他在 tcp_init中被初始化,而tcp_init是在inet_init中被初始化的.然后tcp_hashinfo会被赋值给tcp_proto和sock的sk_prot域.
- struct inet_hashinfo __cacheline_aligned tcp_hashinfo = {
- .lhash_lock = __RW_LOCK_UNLOCKED(tcp_hashinfo.lhash_lock),
- .lhash_users = ATOMIC_INIT(0),
- .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait),
- };
然后来看bind的实现,bind对应的系统调用是sys_bind:
- asmlinkage long sys_bind(int fd, struct sockaddr __user *umyaddr, int addrlen)
- {
- struct socket *sock;
- struct sockaddr_storage address;
- int err, fput_needed;
- ///通过fd查找相应的socket,如果不存在则返回错误.
- sock = sockfd_lookup_light(fd, &err, &fput_needed);
- if (sock) {
- ///用户空间和内核的地址拷贝.
- err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
- if (err >= 0) {
- err = security_socket_bind(sock,
- (struct sockaddr *)&address,
- addrlen);
- if (!err)
- ///调用inet_bind方法.
- err = sock->ops->bind(sock,
- (struct sockaddr *)
- &address, addrlen);
- }
- ///将socket对应的file结构的引用计数.
- fput_light(sock->file, fput_needed);
- }
- return err;
- }
sockfd_lookup_light主要是查找fd对应的socket
- static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed)
- {
- struct file *file;
- struct socket *sock;
- *err = -EBADF;
- ///通过fd得到对应的file结构
- file = fget_light(fd, fput_needed);
- if (file) {
- ///我们在sock_map_fd通过sock_attach_fd中已经把file的private域赋值为socket,因此这里就直接返回socket.
- sock = sock_from_file(file, err);
- if (sock)
- return sock;
- fput_light(file, *fput_needed);
- }
- return NULL;
- }
然后来看inet_bind的实现.
- int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
- {
- ///取得绑定地址.以及相关的socket和inet_sock.
- struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
- struct sock *sk = sock->sk;
- struct inet_sock *inet = inet_sk(sk);
- unsigned short snum;
- int chk_addr_ret;
- int err;
- /* If the socket has its own bind function then use it. (RAW) */
- if (sk->sk_prot->bind) {
- err = sk->sk_prot->bind(sk, uaddr, addr_len);
- goto out;
- }
- err = -EINVAL;
- if (addr_len < sizeof(struct sockaddr_in))
- goto out;
- ///得到地址类型,比如广播地址之类的.
- chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr);
- err = -EADDRNOTAVAIL;
- ///主要是判断绑定的地址不是本地时的一些条件判断.
- if (!sysctl_ip_nonlocal_bind &&
- !inet->freebind &&
- addr->sin_addr.s_addr != htonl(INADDR_ANY) &&
- chk_addr_ret != RTN_LOCAL &&
- chk_addr_ret != RTN_MULTICAST &&
- chk_addr_ret != RTN_BROADCAST)
- goto out;
- ///得到端口号.
- snum = ntohs(addr->sin_port);
- err = -EACCES;
- ///主要是端口号小于prot_sock(1024)必须得有root权限.如果没有则退出.capable就是用来判断权限的.
- if (snum && snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
- goto out;
- /* We keep a pair of addresses. rcv_saddr is the one
- * used by hash lookups, and saddr is used for transmit.
- *
- * In the BSD API these are the same except where it
- * would be illegal to use them (multicast/broadcast) in
- * which case the sending device address is used.
- */
- lock_sock(sk);
- /* Check these errors (active socket, double bind). */
- err = -EINVAL;
- ///检测状态是否为close.如果是close状态,说明这个socket前面已经bind过了.而num只有当raw socket时才会不为0
- if (sk->sk_state != TCP_CLOSE || inet->num)
- goto out_release_sock;
- ///设置相应的地址.rcv_saddr是通过hash查找的源地址,而saddr是ip层使用的源地址(ip头的源地址).
- inet->rcv_saddr = inet->saddr = addr->sin_addr.s_addr;
- ///如果是多播或者广播,设置saddr.
- if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
- inet->saddr = 0; /* Use device */
- ///这里get_port用来发现我们绑定的端口,是否被允许使用.而get_port在tcp中,被实例化为inet_csk_get_port,接近着我们会分析它的实现.
- if (sk->sk_prot->get_port(sk, snum)) {
- inet->saddr = inet->rcv_saddr = 0;
- err = -EADDRINUSE;
- goto out_release_sock;
- }
- ///这两个锁不太理解.不知道谁能解释下.
- if (inet->rcv_saddr)
- sk->sk_userlocks |= SOCK_BINDADDR_LOCK;
- if (snum)
- sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
- ///设置源端口
- inet->sport = htons(inet->num);
- ///目的地址和目的端口,暂时设为0
- inet->daddr = 0;
- inet->dport = 0;
- sk_dst_reset(sk);
- err = 0;
- out_release_sock:
- release_sock(sk);
- out:
- return err;
- }
这里我先来介绍下inet_csk_get_port的流程.
当绑定的port为0时,这时也就是说需要kernel来分配一个新的port.
1 首先得到系统的port范围.
2 随机分配一个port.
3 从bhash中得到当前随机分配的端口的链表(也就是inet_bind_bucket链表).
4 遍历这个链表(链表为空的话,也说明这个port没有被使用),如果这个端口已经被使用,则将端口号加一,继续循环,直到找到当前没有被使用的port,也就是没有在bhash中存在的port.
5 新建一个inet_bind_bucket,并插入到bhash中.
当指定port时.
1 从bhash中根据hash值(port计算的)取得当前指定端口对应的inet_bind_bucket结构.
2 如果bhash中存在,则说明,这个端口已经在使用,因此需要判断这个端口是否允许被reuse.
3 如果不存在,则步骤和上面的第5部一样.
- int inet_csk_get_port(struct sock *sk, unsigned short snum)
- {
- struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
- struct inet_bind_hashbucket *head;
- struct hlist_node *node;
- struct inet_bind_bucket *tb;
- int ret;
- struct net *net = sock_net(sk);
- local_bh_disable();
- if (!snum) {
- ///端口为0,也就是需要内核来分配端口.
- int remaining, rover, low, high;
- ///得到端口范围.
- inet_get_local_port_range(&low, &high);
- remaining = (high - low) + 1;
- rover = net_random() % remaining + low;
- ///循环来得到一个当前没有使用的端口.
- do {
- ///通过端口为key,来得到相应的inet_bind_bucket
- head = &hashinfo->bhash[inet_bhashfn(net, rover,
- hashinfo->bhash_size)];
- spin_lock(&head->lock);
- inet_bind_bucket_for_each(tb, node, &head->chain)
- if (tb->ib_net == net && tb->port == rover)
- ///说明这个端口已被使用,因此需要将端口加1,重新查找.
- goto next;
- break;
- next:
- spin_unlock(&head->lock);
- ///如果端口大于最大值,则将它赋值为最小值(这是因为我们这个端口是随机值,因此有可能很多端口就被跳过了),重新查找.
- if (++rover > high)
- rover = low;
- } while (--remaining > 0);
- /* Exhausted local port range during search? It is not
- * possible for us to be holding one of the bind hash
- * locks if this test triggers, because if 'remaining'
- * drops to zero, we broke out of the do/while loop at
- * the top level, not from the 'break;' statement.
- */
- ret = 1;
- if (remaining <= 0)
- goto fail;
- ///将要分配的端口号.
- snum = rover;
- } else {
- ///指定端口号的情况.和上面的方法差不多,只不过只需要一次.
- head = &hashinfo->bhash[inet_bhashfn(net, snum,
- hashinfo->bhash_size)];
- spin_lock(&head->lock);
- inet_bind_bucket_for_each(tb, node, &head->chain)
- if (tb->ib_net == net && tb->port == snum)
- goto tb_found;
- }
- tb = NULL;
- goto tb_not_found;
- tb_found:
- ///用来处理端口号已经被使用的情况.他被使用的socket不为空的情况.
- if (!hlist_empty(&tb->owners)) {
- ///fastreuse大于0说明其他的socket允许另外的socket也使用这个端口,而reuse表示当前的端口也允许和其他的端口分享这个port.并且socket的状态必须是TCP_LISTEN,才能做这个判断.
- if (tb->fastreuse > 0 &&
- sk->sk_reuse && sk->sk_state != TCP_LISTEN) {
- goto success;
- } else {
- ret = 1;
- ///如果出错,调用inet_csk_bind_conflict.主要是有可能一些使用这个端口的socket,有可能使用不同的ip地址.此时,我们是可以使用这个端口的.
- if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb))
- goto fail_unlock;
- }
- }
- tb_not_found:
- ret = 1;
- ///重新分配一个inet_bind_bucket,并链接到bhash.
- if (!tb && (tb = inet_bind_bucket_create(hashinfo->bind_bucket_cachep,
- net, head, snum)) == NULL)
- goto fail_unlock;
- if (hlist_empty(&tb->owners)) {
- ///设置当前端口的fastreuse,这个域也只能是处于listen的socket才能设置.
- if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
- tb->fastreuse = 1;
- else
- tb->fastreuse = 0;
- } else if (tb->fastreuse &&
- (!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
- tb->fastreuse = 0;
- success:
- ///将这个socket加到这个端口的ower中.
- if (!inet_csk(sk)->icsk_bind_hash)
- inet_bind_hash(sk, tb, snum);
- WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
- ret = 0;
- fail_unlock:
- spin_unlock(&head->lock);
- fail:
- local_bh_enable();
- return ret;
- }
在看listen的代码之前.我们也先来看相关的数据结构:
其中inet_connection_sock我们先前已经介绍过了,它包含了一个icsk_accept_queue的域,这个域是一个request_sock_queue类型,.我们就先来看这个结构:
request_sock_queue也就表示一个request_sock队列.这里我们知道,tcp中分为半连接队列(处于SYN_RECVD状态)和已完成连接队列(处于established状态).这两个一个是刚接到syn,等待三次握手完成,一个是已经完成三次握手,等待accept来读取.
这里每个syn分节到来都会新建一个request_sock结构,并将它加入到listen_sock的request_sock hash表中.然后3次握手完毕后,将它放入到request_sock_queue的rskq_accept_head和rskq_accept_tail队列中.这样当accept的时候就直接从这个队列中读取了.
- struct request_sock_queue {
- ///一个指向头,一个指向结尾.
- struct request_sock *rskq_accept_head;
- struct request_sock *rskq_accept_tail;
- rwlock_t syn_wait_lock;
- u8 rskq_defer_accept;
- /* 3 bytes hole, try to pack */
- ///相应的listen_socket结构.
- struct listen_sock *listen_opt;
- };
listen_sock 表示一个处于listening状态的socket.
- struct listen_sock {
- ///log_2 of maximal queued SYNs/REQUESTs ,这里不太理解这个域的作用.
- u8 max_qlen_log;
- /* 3 bytes hole, try to use */
- ///当前的半连接队列的长度.
- int qlen;
- ///也是指当前的半开连接队列长度,不过这个值会当重传syn/ack的时候(这里要注意是这个syn/ack第一次重传的时候才会减一)自动减一.
- int qlen_young;
- int clock_hand;
- u32 hash_rnd;
- ///这个值表示了当前的syn_backlog(半开连接队列)的最大值
- u32 nr_table_entries;
- ///半连接队列.
- struct request_sock *syn_table[0];
- };
最后来看下request_sock,它保存了tcp双方传输所必需的一些域,比如窗口大小,对端速率,对端数据包序列号等等这些值.
- struct request_sock {
- struct request_sock *dl_next; /* Must be first member! */
- ///mss值.
- u16 mss;
- u8 retrans;
- u8 cookie_ts; /* syncookie: encode tcpopts in timestamp */
- /* The following two fields can be easily recomputed I think -AK */
- u32 window_clamp; /* window clamp at creation time */
- ///窗口大小.
- u32 rcv_wnd; /* rcv_wnd offered first time */
- u32 ts_recent;
- unsigned long expires;
- ///这个域包含了发送ack的操作集合.
- const struct request_sock_ops *rsk_ops;
- struct sock *sk;
- u32 secid;
- u32 peer_secid;
- };
listen的对应的系统调用是sys_listen,它首先通过sockfd_lookup_light查找到相应的socket,然后调用inet_listen,大体流程和bind差不多,只不过中间调用的是inet_listen罢了.
这里还有一个概念那就是backlog,在linux中,backlog的大小指的是已完成连接队列的大小.而不是和半连接队列之和.而半开连接的大小一般是和backlog差不多大小.
而半开连接队列的最大长度是根据backlog计算的,我们后面会介绍这个.
因此我们直接来看inet_listen的实现,这个函数主要是进行一些合法性判断,然后调用inet_csk_listen_start来对相关域进行处理:
- int inet_listen(struct socket *sock, int backlog)
- {
- struct sock *sk = sock->sk;
- unsigned char old_state;
- int err;
- lock_sock(sk);
- err = -EINVAL;
- ///判断状态(非连接状态)以及socket类型.
- if (sock->state != SS_UNCONNECTED || sock->type != SOCK_STREAM)
- goto out;
- old_state = sk->sk_state;
- ///状态必须为close或者listen.
- if (!((1 << old_state) & (TCPF_CLOSE | TCPF_LISTEN)))
- goto out;
- /* Really, if the socket is already in listen state
- * we can only allow the backlog to be adjusted.
- */
- ///非listen状态,需要我们处理.
- if (old_state != TCP_LISTEN) {
- err = inet_csk_listen_start(sk, backlog);
- if (err)
- goto out;
- }
- ///将backlog赋值给sk_max_ack_backlog,也就是完全连接队列最大值.
- sk->sk_max_ack_backlog = backlog;
- err = 0;
- out:
- release_sock(sk);
- return err;
- }
然后来看inet_csk_listen_start的实现.
它的主要工作是新分配一个listen socket,将它加入到inet_connection_sock的icsk_accept_queue域的listen_opt中.然后对当前使用端口进行判断.最终返回:
- int inet_csk_listen_start(struct sock *sk, const int nr_table_entries)
- {
- struct inet_sock *inet = inet_sk(sk);
- struct inet_connection_sock *icsk = inet_csk(sk);
- ///新分配一个listen socket.
- int rc = reqsk_queue_alloc(&icsk->icsk_accept_queue, nr_table_entries);
- if (rc != 0)
- return rc;
- ///先将这两个ack_backlog赋值为0.
- sk->sk_max_ack_backlog = 0;
- sk->sk_ack_backlog = 0;
- inet_csk_delack_init(sk);
- /* There is race window here: we announce ourselves listening,
- * but this transition is still not validated by get_port().
- * It is OK, because this socket enters to hash table only
- * after validation is complete.
- */
- ///设置状态.
- sk->sk_state = TCP_LISTEN;
- ///get_port上面已经分析过了.这里之所以还要再次判断一下端口,是为了防止多线程,也就是另一个线程在我们调用listen之前改变了这个端口的信息.
- if (!sk->sk_prot->get_port(sk, inet->num)) {
- //端口可用的情况,将端口值付给sport,并加入到inet_hashinfo(上面已经分析过)的listening_hash hash链表中.
- inet->sport = htons(inet->num);
- sk_dst_reset(sk);
- ///这里调用__inet_hash实现的.
- sk->sk_prot->hash(sk);
- return 0;
- }
- ///不可用,则返回错误.
- sk->sk_state = TCP_CLOSE;
- __reqsk_queue_destroy(&icsk->icsk_accept_queue);
- return -EADDRINUSE;
- }
最后我们来看下reqsk_queue_alloc的实现:
- ///半开连接的最大长度.
- int sysctl_max_syn_backlog = 256;
- int reqsk_queue_alloc(struct request_sock_queue *queue,
- unsigned int nr_table_entries)
- {
- size_t lopt_size = sizeof(struct listen_sock);
- struct listen_sock *lopt;
- ///在当前的nr_table_entries(也就是listen传进来的backlog)和sysctl_max_syn_backlog取一个较小的值.
- nr_table_entries = min_t(u32, nr_table_entries, sysctl_max_syn_backlog);
- ///也就是说nr_table_entries不能小于8.
- nr_table_entries = max_t(u32, nr_table_entries, 8);
- ///其实也就是使nr_table_entries更接近于2的次幂
- nr_table_entries = roundup_pow_of_two(nr_table_entries + 1);
- ///最终所要分配的listen_sock 的大小.
- lopt_size += nr_table_entries * sizeof(struct request_sock *);
- if (lopt_size > PAGE_SIZE)
- lopt = __vmalloc(lopt_size,
- GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
- PAGE_KERNEL);
- else
- lopt = kzalloc(lopt_size, GFP_KERNEL);
- if (lopt == NULL)
- return -ENOMEM;
- ///计算max_qlen_log的值,他最小要为3,最大为对nr_table_entries求以2为低的log..
- for (lopt->max_qlen_log = 3;
- (1 << lopt->max_qlen_log) < nr_table_entries;
- lopt->max_qlen_log++);
- get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd));
- rwlock_init(&queue->syn_wait_lock);
- queue->rskq_accept_head = NULL;
- ///给nr_table_entries赋值.
- lopt->nr_table_entries = nr_table_entries;
- write_lock_bh(&queue->syn_wait_lock);
- ///将listen_socket赋值给queue->listen_opt
- queue->listen_opt = lopt;
- write_unlock_bh(&queue->syn_wait_lock);
- return 0;
- }