libuv學習筆記(12)

libuv學習筆記(12)

uv_tcp_t數據結構與相關函數(1)

數據結構

typedef struct uv_tcp_s uv_tcp_t;
struct uv_tcp_s {
  UV_HANDLE_FIELDS//uv_handle_t的成員
  UV_STREAM_FIELDS//stream的成員
  //UV_TCP_PRIVATE_FIELDS展開如下:
  SOCKET socket;                                                              
  int delayed_error;                                                          
  union {                                                                    
    struct {
     uv_tcp_accept_t* accept_reqs;//接受請求列表                                              
     unsigned int processed_accepts;                                           
     uv_tcp_accept_t* pending_accepts;//等待處理的接受請求 
     LPFN_ACCEPTEX func_acceptex;//accetpex的函數指針
    } serv;                                     
    struct {
     uv_buf_t read_buffer; //讀取數據的緩存
     LPFN_CONNECTEX func_connectex;//connectex函數指針
    } conn;                                
  } tcp;
};

相關的請求

  typedef struct uv_tcp_accept_s {                                          
    UV_REQ_FIELDS//uv_req_t的成員                                                        
    SOCKET accept_socket;                                                
    char accept_buffer[sizeof(struct sockaddr_storage) * 2 + 32];        
    HANDLE event_handle;                                                 
    HANDLE wait_handle;                                                  
    struct uv_tcp_accept_s* next_pending;                              
  } uv_tcp_accept_t;

相關函數

初始化。導出函數,在uv.h中聲明,在tcp.c中定義

int uv_tcp_init(uv_loop_t* loop, uv_tcp_t* handle) {
  return uv_tcp_init_ex(loop, handle, AF_UNSPEC);//
}

初始化擴展函數。導出函數,在uv.h中聲明,在tcp.c中定義

int uv_tcp_init_ex(uv_loop_t* loop, uv_tcp_t* handle, unsigned int flags) {
  int domain;
  //使用flags的低八位
  domain = flags & 0xFF;
  //只能是一下三種中的一種,其中AF_UNSPEC並不會做任何操作
  if (domain != AF_INET && domain != AF_INET6 && domain != AF_UNSPEC)
    return UV_EINVAL;
  if (flags & ~0xFF)//高八位有值,返回錯誤
    return UV_EINVAL;
  //初始化stream,handle加入loop的handle列表
  uv_stream_init(loop, (uv_stream_t*) handle, UV_TCP);
  handle->tcp.serv.accept_reqs = NULL;
  handle->tcp.serv.pending_accepts = NULL;
  handle->socket = INVALID_SOCKET;
  handle->reqs_pending = 0;
  handle->tcp.serv.func_acceptex = NULL;
  handle->tcp.conn.func_connectex = NULL;
  handle->tcp.serv.processed_accepts = 0;
  handle->delayed_error = 0;

  //在下面的流程中如果發生錯誤,那麼需要移除loop中的本handle
  if (domain != AF_UNSPEC) {
    SOCKET sock;
    DWORD err;
    sock = socket(domain, SOCK_STREAM, 0);//新建socket
    if (sock == INVALID_SOCKET) {//失敗
      err = WSAGetLastError();
      QUEUE_REMOVE(&handle->handle_queue);
      return uv_translate_sys_error(err);
    }
    //將申請的socket與loop的iocp端口聯繫起來
    err = uv_tcp_set_socket(handle->loop, handle, sock, domain, 0);
    if (err) {//失敗
      closesocket(sock);
      QUEUE_REMOVE(&handle->handle_queue);
      return uv_translate_sys_error(err);
    }

  }
  return 0;
}

將tcp socket與iocp端口綁定

static int uv_tcp_set_socket(uv_loop_t* loop,
                             uv_tcp_t* handle,
                             SOCKET socket,
                             int family,
                             int imported) {
  DWORD yes = 1;
  int non_ifs_lsp;
  int err;
  if (handle->socket != INVALID_SOCKET)
    return UV_EBUSY;
  //將socket設置爲非阻塞模式
  if (ioctlsocket(socket, FIONBIO, &yes) == SOCKET_ERROR) {
    return WSAGetLastError();
  }
  //使socket句柄無法被繼承
  if (!SetHandleInformation((HANDLE) socket, HANDLE_FLAG_INHERIT, 0))
    return GetLastError();
  //與iocp端口關聯起來,並將socket作爲key
  if (CreateIoCompletionPort((HANDLE)socket,
                             loop->iocp,
                             (ULONG_PTR)socket,
                             0) == NULL) {
    //在uv_tcp_open中爲打開一個已有socket中,imported爲1
    if (imported) {
      handle->flags |= UV_HANDLE_EMULATE_IOCP;
    } else {
      return GetLastError();
    }
  }

  if (family == AF_INET6) {
    //uv_tcp_non_ifs_lsp_ipv6 = 1表示IPPROTO_IP協議使用真正地操作系統句柄,沒有lsp封裝
    non_ifs_lsp = uv_tcp_non_ifs_lsp_ipv6;
  } else {
    //同理
    non_ifs_lsp = uv_tcp_non_ifs_lsp_ipv4;
  }

  if (pSetFileCompletionNotificationModes &&
      !(handle->flags & UV_HANDLE_EMULATE_IOCP) && !non_ifs_lsp) {
    if (pSetFileCompletionNotificationModes((HANDLE) socket,
        FILE_SKIP_SET_EVENT_ON_HANDLE |
        FILE_SKIP_COMPLETION_PORT_ON_SUCCESS))//如果操作立刻完成,不再向iocp發送通知 
    {
      handle->flags |= UV_HANDLE_SYNC_BYPASS_IOCP;
    } else if (GetLastError() != ERROR_INVALID_FUNCTION) {
      return GetLastError();
    }
  }
  //取消使用Nagle算法
  if (handle->flags & UV_HANDLE_TCP_NODELAY) {
    err = uv__tcp_nodelay(handle, socket, 1);
    if (err)
      return err;
  }
  //使用保持存活
  if (handle->flags & UV_HANDLE_TCP_KEEPALIVE) {
    err = uv__tcp_keepalive(handle, socket, 1, 60);
    if (err)
      return err;
  }
  handle->socket = socket;

  if (family == AF_INET6) {
    handle->flags |= UV_HANDLE_IPV6;
  } else {
    assert(!(handle->flags & UV_HANDLE_IPV6));
  }

  return 0;
}

通過一個socket作爲uv_tcp_t。導出函數,在uv.h中聲明,在tcp.c中定義

int uv_tcp_open(uv_tcp_t* handle, uv_os_sock_t sock) {
  WSAPROTOCOL_INFOW protocol_info;
  int opt_len;
  int err;

  //獲取地址的類型
  opt_len = (int) sizeof protocol_info;
  //獲取協議信息
  if (getsockopt(sock,
                 SOL_SOCKET,
                 SO_PROTOCOL_INFOW,
                 (char*) &protocol_info,
                 &opt_len) == SOCKET_ERROR) {
    return uv_translate_sys_error(GetLastError());
  }
  //將socket與loop的iocp端口聯繫起來
  //根據handle的標記設置socket。
  err = uv_tcp_set_socket(handle->loop,
                          handle,
                          sock,
                          protocol_info.iAddressFamily,
                          1);
  if (err) {
    return uv_translate_sys_error(err);
  }

  return 0;
}

獲取地址。導出函數,在uv.h中聲明,在tcp.c中定義

int uv_tcp_getsockname(const uv_tcp_t* handle,
                       struct sockaddr* name,
                       int* namelen) 
{
  int result;
  if (handle->socket == INVALID_SOCKET) {
    return UV_EINVAL;
  }
  if (handle->delayed_error) {//之前的操作出現錯誤,比如bind操作
    return uv_translate_sys_error(handle->delayed_error);
  }
  result = getsockname(handle->socket, name, namelen);//調用API
  if (result != 0) {
    return uv_translate_sys_error(WSAGetLastError());
  }
  return 0;
}

獲取連接對象的地址。導出函數,在uv.h中聲明,在tcp.c中定義

int uv_tcp_getpeername(const uv_tcp_t* handle,
                       struct sockaddr* name,
                       int* namelen) {
  int result;

  if (handle->socket == INVALID_SOCKET) {
    return UV_EINVAL;
  }

  if (handle->delayed_error) {
    return uv_translate_sys_error(handle->delayed_error);
  }

  result = getpeername(handle->socket, name, namelen);//調用API
  if (result != 0) {
    return uv_translate_sys_error(WSAGetLastError());
  }
  return 0;
}

建立連接。導出函數,在uv.h中聲明,在tcp.c中定義

int uv_tcp_connect(uv_connect_t* req,
                   uv_tcp_t* handle,
                   const struct sockaddr* addr,
                   uv_connect_cb cb) {
  unsigned int addrlen;
  if (handle->type != UV_TCP)
    return UV_EINVAL;
  if (addr->sa_family == AF_INET)
    addrlen = sizeof(struct sockaddr_in);
  else if (addr->sa_family == AF_INET6)
    addrlen = sizeof(struct sockaddr_in6);
  else
    return UV_EINVAL;
  return uv__tcp_connect(req, handle, addr, addrlen, cb);//調用內部函數處理
}

connect的內部處理

int uv__tcp_connect(uv_connect_t* req,
                    uv_tcp_t* handle,
                    const struct sockaddr* addr,
                    unsigned int addrlen,
                    uv_connect_cb cb) {
  int err;
  err = uv_tcp_try_connect(req, handle, addr, addrlen, cb);
  if (err)
    return uv_translate_sys_error(err);
  return 0;
}
static int uv_tcp_try_connect(uv_connect_t* req,
                              uv_tcp_t* handle,
                              const struct sockaddr* addr,
                              unsigned int addrlen,
                              uv_connect_cb cb) {
  uv_loop_t* loop = handle->loop;
  const struct sockaddr* bind_addr;
  BOOL success;
  DWORD bytes;
  int err;
  if (handle->delayed_error) {
    return handle->delayed_error;
  }
  //沒有綁定本地地址,使用默認地址綁定
  if (!(handle->flags & UV_HANDLE_BOUND)) {
    if (addrlen == sizeof(uv_addr_ip4_any_)) {
      bind_addr = (const struct sockaddr*) &uv_addr_ip4_any_;
    } else if (addrlen == sizeof(uv_addr_ip6_any_)) {
      bind_addr = (const struct sockaddr*) &uv_addr_ip6_any_;
    } else {
      abort();
    }
    err = uv_tcp_try_bind(handle, bind_addr, addrlen, 0);
    if (err)
      return err;
    if (handle->delayed_error)
      return handle->delayed_error;
  }

  if (!handle->tcp.conn.func_connectex) { 
    //根據具體的socket獲取connectex函數指針
    if (!uv_get_connectex_function(handle->socket, &handle->tcp.conn.func_connectex))   
    {
      return WSAEAFNOSUPPORT;
    }
  }
  //初始化connect請求
  uv_req_init(loop, (uv_req_t*) req);
  req->type = UV_CONNECT;
  req->handle = (uv_stream_t*) handle;
  req->cb = cb;
  memset(&req->u.io.overlapped, 0, sizeof(req->u.io.overlapped));
  //調用connectex函數,異步鏈接,成功或者失敗會通知iocp端口
  success = handle->tcp.conn.func_connectex(handle->socket,
                                           addr,
                                           addrlen,
                                           NULL,
                                           0,
                                           &bytes,
                                           &req->u.io.overlapped);

  if (UV_SUCCEEDED_WITHOUT_IOCP(success)) {
    //未能成功,直接將請求添加到loop的請求列表進行處理
    handle->reqs_pending++;
    REGISTER_HANDLE_REQ(loop, handle, req);
    uv_insert_pending_req(loop, (uv_req_t*)req);
  } else if (UV_SUCCEEDED_WITH_IOCP(success)) {
    //通過iocp處理,req將會在收到iocp消息時添加到loop的請求列表
    handle->reqs_pending++;
    REGISTER_HANDLE_REQ(loop, handle, req);
  } else {
    return WSAGetLastError();
  }
  return 0;
}

對於通過iocp的connect請求處理,在uv_run輪詢時,會獲取連接的消息,並通過overlapped獲取對應的req,將其添加到loop的請求列表,在下一個循環迭代loop會調用uv_process_reqs進行處理

case UV_CONNECT:
        DELEGATE_STREAM_REQ(loop, (uv_connect_t*) req, connect, handle);
        break;

最終調用uv_process_tcp_connect_req,處理tcp connect請求

void uv_process_tcp_connect_req(uv_loop_t* loop, uv_tcp_t* handle,
    uv_connect_t* req) {
  int err;
  assert(handle->type == UV_TCP);
  //handle的活動計數減一,如果爲零則停止handle(loop活動handle減一,handle狀態變爲非Active)
  //loop的active_req列表中刪除req
  //因爲connect請求到此處處理完就結束了,而對應的uv_tcp_t則應該回歸停止狀態
  UNREGISTER_HANDLE_REQ(loop, handle, req);
  err = 0;
  if (REQ_SUCCESS(req)) {//鏈接成功,更新socket狀態
    if (setsockopt(handle->socket,
                    SOL_SOCKET,
                    SO_UPDATE_CONNECT_CONTEXT,
                    NULL,
                    0) == 0) {
      uv_connection_init((uv_stream_t*)handle);
      handle->flags |= UV_HANDLE_READABLE | UV_HANDLE_WRITABLE;
      loop->active_tcp_streams++;//活動的tcp流計數加一
    } else {
      err = WSAGetLastError();
    }
  } else {
    err = GET_REQ_SOCK_ERROR(req);
  }
  req->cb(req, uv_translate_sys_error(err));//調用回調
  //DECREASE_PENDING_REQ_COUNT(handle);展開:
  do { 
      assert(handle->reqs_pending > 0);  
      handle->reqs_pending--;/等待處理的請求數減一                                        
      if (handle->flags & UV__HANDLE_CLOSING && 
        handle->reqs_pending == 0) {//正在關閉狀態
        uv_want_endgame(loop, (uv_handle_t*)handle); 
      }                                               
  } while (0)
}

讀取數據的請求。導出函數,在uv.h中聲明,在stream.c中定義

//對於uv_tcp_t類型的stream,調用下面的處理
switch (handle->type) {
    case UV_TCP:
      err = uv_tcp_read_start((uv_tcp_t*)handle, alloc_cb, read_cb);
      break;

uv_tcp_read_start,tcp開始讀取

int uv_tcp_read_start(uv_tcp_t* handle, uv_alloc_cb alloc_cb,
    uv_read_cb read_cb) {
  uv_loop_t* loop = handle->loop;
  handle->flags |= UV_HANDLE_READING;//狀態變爲reading
  handle->read_cb = read_cb;
  handle->alloc_cb = alloc_cb;
  //activecnt++,如果之前activecnt爲0,那麼就start  handle
  INCREASE_ACTIVE_COUNT(loop, handle);
  //如果讀取被中斷,那麼就再次開始
  if (!(handle->flags & UV_HANDLE_READ_PENDING)) {
    if (handle->flags & UV_HANDLE_EMULATE_IOCP &&
        !handle->read_req.event_handle) {
      //模擬iocp的情況下,創建事件
      handle->read_req.event_handle = CreateEvent(NULL, 0, 0, NULL);
      if (!handle->read_req.event_handle) {
        uv_fatal_error(GetLastError(), "CreateEvent");
      }
    }
    //將讀取請求放到隊列
    uv_tcp_queue_read(loop, handle);
  }
  return 0;
}

讀取請求放到隊列

static void uv_tcp_queue_read(uv_loop_t* loop, uv_tcp_t* handle) {
  uv_read_t* req;
  uv_buf_t buf;
  int result;
  DWORD bytes, flags;
  assert(handle->flags & UV_HANDLE_READING);
  assert(!(handle->flags & UV_HANDLE_READ_PENDING));//在等待讀取的過程中不應該再次發起讀取請
                                                    //求
  req = &handle->read_req;
  memset(&req->u.io.overlapped, 0, sizeof(req->u.io.overlapped));

  //預分配讀取緩存,目前uv_active_tcp_streams_threshold值爲0
  if (loop->active_tcp_streams < uv_active_tcp_streams_threshold) {
    handle->flags &= ~UV_HANDLE_ZERO_READ;
    handle->alloc_cb((uv_handle_t*) handle, 65536, &handle->tcp.conn.read_buffer);
    if (handle->tcp.conn.read_buffer.len == 0) {
      handle->read_cb((uv_stream_t*) handle, UV_ENOBUFS, &handle->tcp.conn.read_buffer);
      return;
    }
    assert(handle->tcp.conn.read_buffer.base != NULL);
    buf = handle->tcp.conn.read_buffer;
  } 
  else {//沒有預分配
    handle->flags |= UV_HANDLE_ZERO_READ;
    buf.base = (char*) &uv_zero_;
    buf.len = 0;
  }
  //初始化重疊結構體
  memset(&(req->u.io.overlapped), 0, sizeof(req->u.io.overlapped));
  if (handle->flags & UV_HANDLE_EMULATE_IOCP) {//非原生socket等因素導致與iocp端口綁定失敗的情況下
    assert(req->event_handle);
    req->u.io.overlapped.hEvent = (HANDLE) ((ULONG_PTR) req->event_handle | 1);
  }
  flags = 0;
  //異步接收數據
  result = WSARecv(handle->socket,
                   (WSABUF*)&buf,
                   1,
                   &bytes,
                   &flags,
                   &req->u.io.overlapped,
                   NULL);

  if (UV_SUCCEEDED_WITHOUT_IOCP(result == 0)) {
    //不通過iocp處理請求,直接添加到loop的請求列表
    handle->flags |= UV_HANDLE_READ_PENDING;
    req->u.io.overlapped.InternalHigh = bytes;
    handle->reqs_pending++;
    uv_insert_pending_req(loop, (uv_req_t*)req);
  } else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) {
    //通過iocp處理請求
    handle->flags |= UV_HANDLE_READ_PENDING;//修改狀態
    handle->reqs_pending++;//活動請求加一
    if (handle->flags & UV_HANDLE_EMULATE_IOCP &&//socket與iocp綁定失敗
        req->wait_handle == INVALID_HANDLE_VALUE &&
        !RegisterWaitForSingleObject(&req->wait_handle,//註冊等待事件
          req->event_handle, post_completion, (void*) req,
          INFINITE, WT_EXECUTEINWAITTHREAD)) {
      SET_REQ_ERROR(req, GetLastError());
      uv_insert_pending_req(loop, (uv_req_t*)req);//讀請求添加到loop的請求列表
    }
  } else {
    //記錄錯誤信息
    SET_REQ_ERROR(req, WSAGetLastError());
    uv_insert_pending_req(loop, (uv_req_t*)req);
    handle->reqs_pending++;
  }
}

loop對讀請求的處理最終會調用到以下函數:

void uv_process_tcp_read_req(uv_loop_t* loop, uv_tcp_t* handle,
    uv_req_t* req) {
  DWORD bytes, flags, err;
  uv_buf_t buf;
  assert(handle->type == UV_TCP);
  handle->flags &= ~UV_HANDLE_READ_PENDING;//去掉等待處理標記
  if (!REQ_SUCCESS(req)) {
    //讀取發生錯誤
    if ((handle->flags & UV_HANDLE_READING) ||
        !(handle->flags & UV_HANDLE_ZERO_READ)) 
    {
      handle->flags &= ~UV_HANDLE_READING;
      DECREASE_ACTIVE_COUNT(loop, handle);//活動計數減一
      buf = (handle->flags & UV_HANDLE_ZERO_READ) ?
            uv_buf_init(NULL, 0) : handle->tcp.conn.read_buffer;
      err = GET_REQ_SOCK_ERROR(req);
      if (err == WSAECONNABORTED) {
        err = WSAECONNRESET;
      }
      //調用讀取回調,傳入錯誤信息
      handle->read_cb((uv_stream_t*)handle,
                      uv_translate_sys_error(err),
                      &buf);
    }
  } else {//成功
    if (!(handle->flags & UV_HANDLE_ZERO_READ)) {
      if (req->u.io.overlapped.InternalHigh > 0) {
        //讀取成功
        handle->read_cb((uv_stream_t*)handle,
                        req->u.io.overlapped.InternalHigh,
                        &handle->tcp.conn.read_buffer);
        //如果當前獲取的數據長度與緩存長度相同,繼續讀取
        if (req->u.io.overlapped.InternalHigh < handle->tcp.conn.read_buffer.len) {
          goto done;
        }
      } else {
        //鏈接失敗
        if (handle->flags & UV_HANDLE_READING) {
          handle->flags &= ~UV_HANDLE_READING;
          DECREASE_ACTIVE_COUNT(loop, handle);
        }
        handle->flags &= ~UV_HANDLE_READABLE;
        buf.base = 0;
        buf.len = 0;
        handle->read_cb((uv_stream_t*)handle, UV_EOF, &handle->tcp.conn.read_buffer);
        goto done;
      }
    }
    //持續讀取
    while (handle->flags & UV_HANDLE_READING) {
      handle->alloc_cb((uv_handle_t*) handle, 65536, &buf);//調用分配內存的回調
      if (buf.len == 0) {
        handle->read_cb((uv_stream_t*) handle, UV_ENOBUFS, &buf);
        break;
      }
      assert(buf.base != NULL);
      flags = 0;
      if (WSARecv(handle->socket,
                  (WSABUF*)&buf,
                  1,
                  &bytes,
                  &flags,
                  NULL,
                  NULL) != SOCKET_ERROR) {
        if (bytes > 0) {
          //成功讀取,調用讀取回調
          handle->read_cb((uv_stream_t*)handle, bytes, &buf);
          //判斷是否需要繼續讀取
          if (bytes < buf.len) {
            break;
          }
        } else {
          //鏈接被關閉了
          handle->flags &= ~(UV_HANDLE_READING | UV_HANDLE_READABLE);
          DECREASE_ACTIVE_COUNT(loop, handle);
          handle->read_cb((uv_stream_t*)handle, UV_EOF, &buf);
          break;
        }
      } else {//讀取出錯
        err = WSAGetLastError();
        if (err == WSAEWOULDBLOCK) {
          //讀取緩存爲空
          handle->read_cb((uv_stream_t*)handle, 0, &buf);
        } else {
          //出現了嚴重錯誤
          handle->flags &= ~UV_HANDLE_READING;
          DECREASE_ACTIVE_COUNT(loop, handle);
          if (err == WSAECONNABORTED) {
            err = WSAECONNRESET;
          }
          handle->read_cb((uv_stream_t*)handle,
                          uv_translate_sys_error(err),
                          &buf);
        }
        break;
      }
    }
done:
    //一個讀取請求結束之後,如果沒有停止讀取,那麼再次開啓一個請求,持續讀取
    if ((handle->flags & UV_HANDLE_READING) &&
        !(handle->flags & UV_HANDLE_READ_PENDING)) {
      uv_tcp_queue_read(loop, handle);
    }
  }
  DECREASE_PENDING_REQ_COUNT(handle);//handle等待處理的請求數量減一
}

停止讀取請求。導出函數,在uv.h中聲明,在stream.c中定義

並不會立刻停止,在調用該函數之後,會影響最近的一次讀取請求處理,並且不會再次發送讀取請求,依次達到停止效果。

int uv_read_stop(uv_stream_t* handle) {
  int err;
  if (!(handle->flags & UV_HANDLE_READING))//非讀取狀態,返回
    return 0;
  err = 0;
  if (handle->type == UV_TTY) {
    err = uv_tty_read_stop((uv_tty_t*) handle);
  } else {
    if (handle->type == UV_NAMED_PIPE) {
      uv__pipe_stop_read((uv_pipe_t*) handle);
    } else {
      handle->flags &= ~UV_HANDLE_READING;//TCP
    }
    DECREASE_ACTIVE_COUNT(handle->loop, handle);//handle的活動計數減一
  }
  return uv_translate_sys_error(err);
}

從uv_tcp_t的connet以及讀取可以看出。
1.uv_tcp_t的激活狀態主要是依靠請求,比如在connet請求處理完之後,如果沒有再發送其他請求(比如read),那麼uv_tcp_t將會處於停止狀態,loop不會在活動handle中記錄該handle。
2.部分流相關的請求會記錄在loop的請求列表中,比如connect以及write,個人推測是用戶發出的請求會添加到loop的請求隊列,而libuv自己處理中發出的請求則不會(比如uv_read_start,參數中並沒有req,是使用的是stream內部的私有uv_read_t;)。
3.一些流相關的請求是會在處理之後自己再重複發起的,比如read,可以推測accept也是如此,不需要用戶自己再去重複發送,但是需要用戶去停止。

這裏寫代碼片

通過uv_close關閉uv_tcp_t,會調用uv_tcp_close

void uv_tcp_close(uv_loop_t* loop, uv_tcp_t* tcp) {
  int close_socket = 1;
  if (tcp->flags & UV_HANDLE_READ_PENDING) {
    //等待讀請求的處理。也就是調用了異步讀取WSARecv,但是還未收到數據
    if (!(tcp->flags & UV_HANDLE_SHARED_TCP_SOCKET)) 
    {
      //非共享socket,直接shutdown。在uv_tcp_open的方式初始化uv_tcp_t時,會設置爲
      //UV_HANDLE_SHARED_TCP_SOCKET
      shutdown(tcp->socket, SD_SEND);//關閉socket的寫功能
    } else if (uv_tcp_try_cancel_io(tcp) == 0)  
    {//通過CancelIo  API取消
      //如果是共享的,那麼嘗試取消i/o請求。如果成功,並不立刻關閉socket,等讀請求返回的時候關閉
      close_socket = 0;
    } else  
    {//共享socket,取消失敗
      //只能關閉socket了。
    }
  } else if ((tcp->flags & UV_HANDLE_SHARED_TCP_SOCKET) &&
             tcp->tcp.serv.accept_reqs != NULL) {
    /* Under normal circumstances closesocket() will ensure that all pending */
    /* accept reqs are canceled. However, when the socket is shared the */
    /* presence of another reference to the socket in another process will */
    /* keep the accept reqs going, so we have to ensure that these are */
    /* canceled. */
    if (uv_tcp_try_cancel_io(tcp) != 0) {
      /* When cancellation is not possible, there is another option: we can */
      /* close the incoming sockets, which will also cancel the accept */
      /* operations. However this is not cool because we might inadvertently */
      /* close a socket that just accepted a new connection, which will */
      /* cause the connection to be aborted. */
      unsigned int i;
      for (i = 0; i < uv_simultaneous_server_accepts; i++) {
        uv_tcp_accept_t* req = &tcp->tcp.serv.accept_reqs[i];
        if (req->accept_socket != INVALID_SOCKET &&
            !HasOverlappedIoCompleted(&req->u.io.overlapped)) {
          closesocket(req->accept_socket);
          req->accept_socket = INVALID_SOCKET;
        }
      }
    }
  }

  if (tcp->flags & UV_HANDLE_READING) {
    tcp->flags &= ~UV_HANDLE_READING;//停止讀取
    DECREASE_ACTIVE_COUNT(loop, tcp);
  }

  if (tcp->flags & UV_HANDLE_LISTENING) {
    tcp->flags &= ~UV_HANDLE_LISTENING;
    DECREASE_ACTIVE_COUNT(loop, tcp);
  }

  if (close_socket) {
    closesocket(tcp->socket);
    tcp->socket = INVALID_SOCKET;
    tcp->flags |= UV_HANDLE_TCP_SOCKET_CLOSED;
  }

  tcp->flags &= ~(UV_HANDLE_READABLE | UV_HANDLE_WRITABLE);
  uv__handle_closing(tcp);//狀態改爲closing
  //沒有等待處理的請求了,關閉handle,否則需要等到處理請求時關閉
  if (tcp->reqs_pending == 0) {
    uv_want_endgame(tcp->loop, (uv_handle_t*)tcp);
  }
}

uv_tcp_t中有兩個計數
1.reqs_pending,表示已經添加到loop中的請求,或者已經與iocp綁定的端口調用了i/o函數,可以通過iocp端口獲取的請求,在請求處理結束之後減一,如果爲0且處於UV_HANDLE_CLOSING狀態,那麼會將handle添加到關閉列表。
2.activecnt,調用了某一個請求後加一,在對應的處理函數中減一,爲0並且正在關閉時stop handle
請求的調用與處理會引發兩個計數的變化。

對於部分請求,比如connect與write,會添加到loop的active_reqs列表,請求處理完成之後,會從列表刪除。該列表是判斷loop alive的條件之一。

與tcp相關的還有accept與write操作,將在下一篇中分析

發佈了34 篇原創文章 · 獲贊 2 · 訪問量 5萬+
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章