C++ SOCKET通信模型(三)IOApc

說IOCP之前,不得不說下基於Overlapped IO模型的另外一種設計思路,上篇說的是基於事件通知的,這篇就說下完成例程,也可以說是回調。基於事件通知,始終有每個線程最多隻能監聽64個事件的限制,就算經過我上篇寫的多線程優化,但線程數始終是有限的,我8G 內存 大概就1500多個線程左右,那麼 1500*64=9W6,看上去感覺也足夠了,但過多的上下文切換不說,離單機可連接的socket最大值還差很遠。一般我做服務器,要求線程數不多餘單機線程數的兩倍,所以不可能去開這麼多線程。接下來就有了另外一種思路,也就是微軟以前大力推廣的APC回調隊列。

先看看百科上的WSARecv,看過上篇文章的應該會發現,WSARecv裏面最後一個參數我寫的NULL

int WSARecv(
SOCKET s, // 當然是投遞這個操作的套接字
,與Recv函數不同
// 這裏需要一個由WSABUF結構構成的數組
DWORD dwBufferCount, // 數組中WSABUF結構的數量
LPDWORD lpNumberOfBytesRecvd, // 如果接收操作立即完成,這裏會返回函數調用所接收到的字節數
LPWSAOVERLAPPED_COMPLETION_ROUTINE lpCompletionRoutine // 完成例程中將會用到的參數,我們這裏設置爲 NULL
);

在最後一個參數上綁定完成例程代替事件通知,即與IOEvent可達到一致的效果。維護也更簡單,不再需要事件數組,所以client的存儲也就不用再與事件保持對應關係,可以生成ID,放在map中,看起來更切合實際。至此,不得不說APC的缺陷:1、若線程被佔用,APC隊列上的回調將無法得到執行,必須SleepEx,不能阻塞,不能Sleep 2、APC回調執行後還是在執行線程,無法轉接到其他線程,所以必須在最開始與accept分離 3、既然SleepEx 是休眠,那麼無可避免的是,最開始的時候會有一定延遲,但服務器在熱狀態的時候幾乎不造成影響 4、APC上回調喚醒SleepEx後 函數內部的語句也會執行,所以會有多餘的重複檢測,我這是(_acceptLock[I].lock();if(_acceptDeque[I].size()==0))5、負載均衡問題成爲難題,由於線程裏面都是回調,每個回調執行時間是未知的,可能造成一些線程很忙,一些線程比較閒的情況


那麼還是來看代碼吧:非調試 請關掉輸出,不然單個Do 執行速度太慢了,線程內對應的回調都是順序執行的,具體請看APC隊列

server:

// IOApc.cpp: 定義控制檯應用程序的入口點。
//

#include "stdafx.h"
#include <iostream>
#include<WinSock2.h>
#include<process.h>
#include<mutex>
#include <deque>
#include <map>
#pragma comment(lib,"ws2_32.lib")
unsigned int WINAPI CreateServ(LPVOID args);
unsigned int WINAPI Proc(LPVOID args);
void CALLBACK Do(IN DWORD dwError,
	IN DWORD cbTransferred,
	IN LPWSAOVERLAPPED lpOverlapped,
	IN DWORD dwFlags);
using namespace std;
const int _thread_count = 8;
const int _bufLen = 1024;

struct Client
{
	WSAOVERLAPPED overlapped;
	SOCKET s;
	WSABUF buf;
	int procId;
	int id;
};

DWORD dwRecvCount = 0;
DWORD nFlag = 0;
deque<Client*> _acceptDeque[_thread_count];
mutex _acceptLock[_thread_count];
map<int, Client*> _clients;
mutex m;
int main()
{
	_beginthreadex(0, 0, CreateServ, 0, 0, 0);
	for (int i = 0; i < _thread_count; i++) {
		int* temp = new int(i);
		_beginthreadex(0, 0, Proc, temp, 0, 0);
	}
	cin.get();
	cin.get();
	return 0;
}
void release(Client* c)
{
	m.lock();
	_clients.erase(c->id);
	m.unlock();

	cout << "release" << endl;
	closesocket(c->s); //關閉套接字  
	delete[] c->buf.buf;
	delete c;
}
unsigned int WINAPI Proc(LPVOID args)
{
	int I = *(int*)args;
	while (true)
	{
		while (true)
		{
			_acceptLock[I].lock();
			if(_acceptDeque[I].size()==0)
			{
				_acceptLock[I].unlock();
				break;
			}
			Client* c= _acceptDeque[I].front();
			_acceptDeque[I].pop_front();
			_acceptLock[I].unlock();

			c->procId = I;

			m.lock();
			int i;
			do{
				i = rand() % MAXINT32;
			} while (_clients.find(i) != _clients.end());
			c->id = i;
			_clients.insert(pair<int, Client*>(i, c));
			m.unlock();
			if(WSARecv(c->s, &c->buf, 1, &dwRecvCount, &nFlag, &c->overlapped, Do)== SOCKET_ERROR)
			{
				int err = WSAGetLastError();
				if(err!=WSA_IO_PENDING)
				{
					release(c);
				}
			}
		}
		SleepEx(1000, true);
	}
}

void CALLBACK Do(IN DWORD dwError,
	IN DWORD cbTransferred,
	IN LPWSAOVERLAPPED lpOverlapped,
	IN DWORD dwFlags)
{
	Client* c = (Client*)lpOverlapped;
	if (dwError != 0 || cbTransferred == 0) //有錯誤發生或者對方斷開連接  
	{
		release(c);
		return;
	}
	cout << "proc by:" << c->procId << endl;
	//cout << c->buf.buf << endl;
	memset(c->buf.buf, 0, _bufLen);
	char buf[128];
	sprintf_s(buf, "hello client");
	send(c->s, buf, 128, 0);
	if (WSARecv(c->s, &c->buf, 1, &dwRecvCount, &nFlag, &c->overlapped, Do) == SOCKET_ERROR)
	{
		int err = WSAGetLastError();
		if (err != WSA_IO_PENDING)
		{
			release(c);
		}
	}
}
unsigned int WINAPI CreateServ(LPVOID args) {
	srand(time(0));
	WORD wVersion;
	WSADATA wsaData;
	int err;
	wVersion = MAKEWORD(2, 1);
	err = WSAStartup(wVersion, &wsaData);
	if (err != 0) {
		return 0;
	}
	if (LOBYTE(wsaData.wVersion) != 2 || HIBYTE(wsaData.wVersion) != 1) {
		WSACleanup();
		return 0;
	}
	SOCKET sockSrv = WSASocket(AF_INET, SOCK_STREAM, IPPROTO_TCP, 0, 0, WSA_FLAG_OVERLAPPED);
	const char chOpt = 1;
	setsockopt(sockSrv, IPPROTO_TCP, TCP_NODELAY, &chOpt, sizeof(chOpt));

	int nSendBufLen = 16 * 1024 * 1024;
	setsockopt(sockSrv, SOL_SOCKET, SO_SNDBUF, (const char*)&nSendBufLen, sizeof(int));

	SOCKADDR_IN addrSrv;
	addrSrv.sin_addr.S_un.S_addr = htonl(ADDR_ANY);
	addrSrv.sin_family = AF_INET;
	addrSrv.sin_port = htons(6001);

	::bind(sockSrv, (SOCKADDR*)&addrSrv, sizeof(SOCKADDR));

	err = listen(sockSrv, SOMAXCONN);
	if (err == SOCKET_ERROR) {
		cout << "listen failed" << endl;
		WSACleanup();
		return 0;
	}

	SOCKADDR_IN remoteAddr;
	int addrSize = sizeof(remoteAddr);
	//accept loop

	while (true) {
		SOCKET s = accept(sockSrv, (SOCKADDR*)&remoteAddr, &addrSize);
		Client* c = new Client;
		c->s = s;
		char*  buf = new char[_bufLen];
		memset(buf, 0, _bufLen);
		c->buf.buf = buf;
		c->buf.len = _bufLen;
		int min=0;
		for(int i=1;i<_thread_count;i++)
		{
			if(_acceptDeque[i].size()<_acceptDeque[min].size())
			{
				min = i;
			}
		}
		_acceptLock[min].lock();
		_acceptDeque[min].push_back(c);
		_acceptLock[min].unlock();
	}
	return 0;
}


client:

與C++ SOCKET通信模型(一)相同,這裏就不再重複粘貼出來

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章