一. IO密集測試的概念
說白了就是大量的client對server進行連接或斷開,同時也只進行連接和斷開,並不做大量的數據處理。
IO密集測試就是檢測server程序應對大量IO請求時的處理能力。也是C10k成功完成的標誌之一。
二. 我的IO壓力測試方案
測試需求:
1.參考聊天服務器應對客戶端時的情形:客戶的鏈接和斷開完全是隨機發生的。
2.可以連接10000個客戶端程序。
3.保證每一個客戶端與服務器是連通的,及服務器可以接受到客戶端發來的數據。
4.服務可以操作客戶端發來的數據,同時發送出去。
所以針對以上需求分別提出以下解決方法:
1.測試客戶端實現隨機的斷開和鏈接服務器
生成1-100的隨機數x,通過判斷x所在區間,生成概率事件。連接不同的概率事件和客戶端操作以實現client隨機的斷開與鏈接serve。
例:系統隨機生成一個數x,x落在區間[0-50)讓客戶端執行連接服務器操作,如果落在[50-100)讓客戶端執行斷開服務器操作。
2.測試client可以隨機斷開和鏈接sever的同時可以達到10000的鏈接數
方案一:直接socket10000次,先鏈接10000個client,然後對其隨機的斷開重連。
方案二:動態的鏈接和斷開client和server之間的鏈接,讓其緩慢增加最後的鏈接的總數穩定在10000左右。
參考聊天服務器的工作狀態,我選擇了方案二,以下描述方案二的實現。
基本實現思路基於1.測試客戶端實現隨機的斷開和鏈接服務器的概率事件,這裏根據以建立的鏈接數動態的改變鏈接和斷開的概率,如圖:
縱軸爲概率值,橫軸爲以建立的鏈接總數。
在是現實時爲了減少計算量我定義了閾值和對應得概率,沒有使用線性關係表達式通過連接數來計算概率。
舉例程序中處理流程(連接數在區間(1000,1999]時的處理過程,連接概率爲0.95,斷開概率爲0.05)如圖:
在代碼實現時,我以1000爲單位對10000分了10個區間,分別對應不同的概率來處理對應的事件。
3.測試client創建的所有連接都是可用的
我測試方法是,向所有sock而描述符寫數據,檢查服務器是否可以收到這些數據。
根據以上方案提出以下需求:
1.動態的保存所有已連接的socket描述符,並拋棄斷開鏈接的socket描述符。
2.向所有的socket描述符發送數據。
對這倆個問題的解決:
1).動態的保存或丟棄socket描述符
這裏我使用了c++的vector存放socket建立的描述符,因爲他比數組好用。計提理由可以查看手冊。
在socket創建鏈接後,把生成的鏈接描述符,添加到vector隊尾;斷開連接時把vector隊頭的描述符銷燬釋放。
結構如圖:
2).向所有socket描述符發送數據
基於1.動態的保存或丟棄socket描述符的實現,發送數據只要把vector隊列所有的socket描述符遍歷一遍,並向其發送數據。
需要注意的時,讀到最後一個時的處理;以及比那裏Vector的速度一點要大於鏈接斷開和鏈接的速度(因爲隊列是動態的,如果斷開和鏈接的速度大於或等於遍歷的速度,可能會造成對描述符的漏讀)。
結構如圖:
4.server得到數據後的處理
爲了體現服務器的性能而不是客戶端的性能,我在這裏做了簡單傻瓜化的處理,將服務器接收到的數據統一發送到指定的客戶端,這個特殊的客戶端只負責接收服務器發來的數據。
它與服務器和測試客戶端的關係如圖:
以上就服務器測試的一個基本方案以及問題的解決思路。
三. 測試客戶端實現
1.c10k IO client
主函數文件:
#include "simulation.h"
typedef std::vector<int> ReadList;
extern ReadList readlist;
void *writeData_pthread(void * arg)
{
int sockfd;
int i = 0;
char buf[50];
int n = 0;
while(1)
{
if (i < readlist.size())
{
sockfd = readlist[i];
//std::cout << "\033[30m" <<"sockfd:" << sockfd << "and i" << i << "\033[0m" << std::endl;
i++;
sprintf(buf, "Hallo Sever I am Client NO. %d , szie: %d", sockfd, readlist.size());
n = write(sockfd, buf , sizeof(buf));
if (n < 0)
{
std::cout << "\033[32m" <<"error : sockfd:" << sockfd << "\033[0m" << std::endl;
}
usleep(1000);
}else{
i = 0;
}
}
}
int main()
{
int i = 0;
int connectNum = 0;
pthread_t wid;
pthread_create(&wid, NULL, writeData_pthread, NULL);
while(1)
{
connectNum = readlist.size();
probabilityEvent(i++, randomRank(connectNum) );
if ((i%100) == 0)
{
std::cout << "\033[31m" <<"connects number:" << connectNum << " operation number:"<< i << "\033[0m" << std::endl;
}
usleep(10000);
}
pthread_join(wid, NULL);
return 0;
}
與概率事件執行操作的文件:
頭文件:
#ifndef SIMULATION_H
#define SIMULATION_H
#include <iostream>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <vector>
#include <string.h>
#include <pthread.h>
//#define RAND_MAX 32767
enum RANK_CONNECT
{
RANK_CONNECT_1 = 1, RANK_CONNECT_2 = 2, RANK_CONNECT_3 = 3, RANK_CONNECT_4 = 4, RANK_CONNECT_5 = 5,
RANK_CONNECT_6 = 6, RANK_CONNECT_7 = 7, RANK_CONNECT_8 = 8, RANK_CONNECT_9 = 9, RANK_CONNECT_10 =10,
EQUILIBRIUM_VALUE = 11
};
enum RANK_PROB
{
/*close 與斷開連接相關的閾值*/
RANK_PROB_0 = 0,RANK_PROB_0_05 = 5,RANK_PROB_0_1 = 10,RANK_PROB_0_15 = 15,
RANK_PROB_0_2 = 20,RANK_PROB_0_25 = 25,RANK_PROB_0_3 = 30,RANK_PROB_0_35 = 35,
RANK_PROB_0_4 = 40,RANK_PROB_0_45 = 45,RANK_PROB_0_5 = 50,
/*connet 與建立鏈接相關的閾值*/
RANK_PROB_0_55 = 55,RANK_PROB_0_6 = 60,RANK_PROB_0_65 = 65,RANK_PROB_0_7 = 70,
RANK_PROB_0_75 = 75,RANK_PROB_0_8 = 80,RANK_PROB_0_85 = 85,RANK_PROB_0_9 = 90,
RANK_PROB_0_95 = 95,RANK_PROB_1 = 100
};
int randomData(int min, int max);
int randomRank(long sockfdNum);
int probabilitySelect(int i, int probabConnect, int probabClose);
int probabilityEvent(int i,int rankConnet);
#endif // SIMULATION_H
程序文件:
#include "simulation.h"
#include "myclient.h"
typedef std::vector<int> ReadList;
ReadList readlist;
/*
> randrom a nuber in [min,max).
*/
int randomData(int min, int max)
{
return rand() % (max - min) + min;
}
/*
> random rank about sockfd number
*/
int randomRank(long sockfdNum)
{
if (sockfdNum < 1000)
{
return RANK_CONNECT_1;
}
if (sockfdNum < 2000 )
{
return RANK_CONNECT_2;
}
if (sockfdNum < 3000)
{
return RANK_CONNECT_3;
}
if (sockfdNum < 5000)
{
return RANK_CONNECT_4;
}
if (sockfdNum < 6000)
{
return RANK_CONNECT_5;
}
if (sockfdNum < 6000)
{
return RANK_CONNECT_6;
}
if (sockfdNum < 7000)
{
return RANK_CONNECT_7;
}
if (sockfdNum < 8000)
{
return RANK_CONNECT_8;
}
if (sockfdNum < 9000)
{
return RANK_CONNECT_9;
}
if (sockfdNum < 10000)
{
return RANK_CONNECT_10;
}
if (sockfdNum >= 10000)
{
return EQUILIBRIUM_VALUE;
}
return 0;
}
/*
> do samething
*/
int probabilitySelect(int i, int probabConnect, int probabClose)
{
int number = randomData(0 ,100);
char buf[30];
int n;
if (number < probabConnect)
{
/*建立連接*/
int sockfd;
sockfd = socket(AF_INET, SOCK_STREAM, 0);
struct sockaddr_in serveraddr;
bzero(&serveraddr, sizeof(serveraddr));
serveraddr.sin_family = AF_INET;
inet_pton(AF_INET, "127.0.0.1", &serveraddr.sin_addr);
serveraddr.sin_port = htons(8000);
connect(sockfd, (struct sockaddr *)&serveraddr, sizeof(serveraddr));
/*sprintf(buf, "Client:%d", sockfd);
write(sockfd, buf , sizeof(buf));
if ((n = read(sockfd, buf, sizeof(buf))) > 0)
{
std::cout << "\033[34mFrom systeam:" << buf << "\033[0m"<< std::endl;
}
/********************/
readlist.push_back(sockfd);
}else{
/*斷開連接*/
int sockfd = readlist.front();
close(sockfd);
/********************/
readlist.erase(readlist.begin());
}
}
/*
> return probability event [flag] about [sockfd number]
*/
int probabilityEvent(int i,int rankConnet)
{
switch (rankConnet)
{
case RANK_CONNECT_1:
probabilitySelect(i, RANK_PROB_1, RANK_PROB_0);
break;
case RANK_CONNECT_2:
probabilitySelect(i, RANK_PROB_0_95, RANK_PROB_0_05);
break;
case RANK_CONNECT_3:
probabilitySelect(i, RANK_PROB_0_9, RANK_PROB_0_1);
break;
case RANK_CONNECT_4:
probabilitySelect(i, RANK_PROB_0_85, RANK_PROB_0_15);
break;
case RANK_CONNECT_5:
probabilitySelect(i, RANK_PROB_0_8, RANK_PROB_0_2);
break;
case RANK_CONNECT_6:
probabilitySelect(i, RANK_PROB_0_75, RANK_PROB_0_25);
break;
case RANK_CONNECT_7:
probabilitySelect(i, RANK_PROB_0_7, RANK_PROB_0_3);
break;
case RANK_CONNECT_8:
probabilitySelect(i, RANK_PROB_0_65, RANK_PROB_0_35);
break;
case RANK_CONNECT_9:
probabilitySelect(i, RANK_PROB_0_6, RANK_PROB_0_4);
break;
case RANK_CONNECT_10:
probabilitySelect(i, RANK_PROB_0_55, RANK_PROB_0_45);
break;
case EQUILIBRIUM_VALUE:
probabilitySelect(i, RANK_PROB_0_5, RANK_PROB_0_5);
break;
default:
break;
}
}
其他文件,與此測試沒有實際聯繫:
#ifndef MYCLIENT_H
#define MYCLIENT_H
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <sys/socket.h>
#include <string.h>
#include <arpa/inet.h>
#include <unistd.h>
#include <time.h>
#define RANDOM 0
#define DELAYED 1
void inputString(char *buf, size_t len)
{
char c;
int i = 0;
bzero(buf, len);
while( ((c = getchar()) != '\n'))
{
buf[i++] = c;
if (i >= len)
{
break;
}
}
}
void set_time(char *buf)
{
time_t tt = time(NULL);
tm* t = localtime(&tt);
sprintf(buf,"<%d-%02d-%02d %02d:%02d:%02d>",
t->tm_year+1900, t->tm_mon+1, t->tm_mday, t->tm_hour, t->tm_min, t->tm_sec);
}
void makebuf(char *timebuf, char *buf, char *wbuf)
{
sprintf(wbuf, "TIME:%s\t%s",timebuf, buf);
}
void random_delayed_tx(int mode, int sec,int sockfd, char* wbuf , int wbuflen)
{
if (mode == RANDOM)
{
random_mode:
int x = rand()%11;
time_t tt = time(NULL);
tm* t = localtime(&tt);
if ((t->tm_sec)%10 > x)
{
std::cout <<"SET IS :" << wbuf << std::endl;
write(sockfd, wbuf , wbuflen);
}
sleep(1);
}else if (mode == DELAYED)
{
if (sec <= 0)
{
sec = 0;
}
std::cout <<"SET IS :" << wbuf << std::endl;
write(sockfd, wbuf , wbuflen);
sleep(sec);
}else{
goto random_mode;
}
}
#endif //MYCLIENT_H
2.read client
其中頭文件與上面的頭文件一樣,如果讀到數據就打印:
#include "myclient.h"
int main()
{
int sockfd;
sockfd = socket(AF_INET, SOCK_STREAM, 0);
struct sockaddr_in serveraddr;
bzero(&serveraddr, sizeof(serveraddr));
serveraddr.sin_family = AF_INET;
inet_pton(AF_INET, "127.0.0.1", &serveraddr.sin_addr);
serveraddr.sin_port = htons(8000);
connect(sockfd, (struct sockaddr *)&serveraddr, sizeof(serveraddr));
write(sockfd, "hallo" ,5);
int n;
char buf[100];
char timebuf[50];
char wbuf[100];
while(1)
{
if ((n = read(sockfd, buf, sizeof(buf))) > 0)
{
std::cout << "\033[34mFrom systeam:" << buf << "\033[0m"<< std::endl;
}
usleep(100);
}
close(sockfd);
return 0;
}
四. 測試時服務器出現的問題與調整
前面的文章已經貼出了代碼,看過的人知道我只用一把鎖,當時但是我並沒有注意到這個問題,我把虛擬機從單核改爲多核程時服務器只能接受部分來自客戶端的數據。
還有我並沒有判斷pthread_cond_wait條件,直接使用導致程序不穩定。
以及爲了配合來個測試客戶端的測試,服務器代碼也在做了一些細微的調整,修改如下:
#include "pthread_pool.h"
extern std::vector<struct epoll_event> events;
extern int epollfd;
typedef std::vector<struct WorkNode> WorkList;
WorkList waitlist;//work list
typedef std::vector<int> ReadList;
ReadList readlist; //read list,[events id]
pthread_cond_t has_read = PTHREAD_COND_INITIALIZER; //條件變量初始化
pthread_cond_t has_write = PTHREAD_COND_INITIALIZER; //條件變量初始化
pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER; //互斥鎖初始化
pthread_mutex_t lock2 = PTHREAD_MUTEX_INITIALIZER; //互斥鎖初始化【修改1:添加鎖2】
pthread_mutex_t lock3 = PTHREAD_MUTEX_INITIALIZER; //互斥鎖初始化【修改2:添加鎖3】
pthread_mutex_t lock4 = PTHREAD_MUTEX_INITIALIZER; //互斥鎖初始化【修改3:添加鎖4】
pthread_t nrTid[4]; //初始化,線程描述符,MAX爲最大線程創建數
pthread_t nwTid[4];
int prhread_read_num = 0;
int prhread_write_num = 0;
void set_readList(int i)
{
pthread_mutex_lock(&lock);
readlist.push_back(i);
pthread_cond_signal(&has_read);
pthread_mutex_unlock(&lock);
}
int get_readList()
{
int eventId;
eventId = readlist.front();
readlist.erase(readlist.begin());
return eventId;
}
void set_workList(int connfd, char* wBuf)
{
WorkNode writeWork;
pthread_mutex_lock(&lock3); //【修改4:寫操作加鎖3】
writeWork.connfd = connfd;
strcpy(writeWork.wBuf,wBuf);
waitlist.push_back(writeWork);
pthread_cond_signal(&has_write); //【修改5:寫信號改爲此函數內發送】
pthread_mutex_unlock(&lock3); //【修改6:寫操作加鎖3】
}
WorkNode get_workList()
{
WorkNode writeWork;
writeWork.connfd = waitlist.front().connfd;
strcpy(writeWork.wBuf ,waitlist.front().wBuf);
waitlist.erase(waitlist.begin());
return writeWork;
}
int make_read_worker(int pthreadNum)
{
int err[pthreadNum], error;
prhread_read_num = pthreadNum;
for(int i=0; i<pthreadNum; i++)
{
err[i] = pthread_create(&nrTid[i], NULL, read_worker, NULL);
if(err[i] != 0)
{
std::cout << "make pthread error :" << err[i] << std::endl;
exit(1);
}
error += err[i];
std::cout << "\033[32mNO.\033[0m"<< i+1 << "\033[32m, pthread creation successful!\033[0m" << std::endl;
}
return error;
}
void *read_worker(void *arg)
{
pthread_t tid;
tid = pthread_self(); //get pthread id
int eventsID; //i
int connfd, n, nread;
struct epoll_event epfd;
char buf[100];
std::cout << "???? I am READ worker ????" << tid << std::endl;
while(1)
{
n = 0;
pthread_mutex_lock(&lock2); //【修改7:讀時加鎖3】
if (readlist.size() < 1) //【修改8:防止 pthread_cond_wait誤觸發】
{
pthread_cond_wait(&has_read, &lock);
}
eventsID = get_readList();
connfd = events[eventsID].data.fd;
//connfd = get_readList();
while((nread = read(connfd, buf, 100)) > 0) //read to over
{
n += nread;
}
if (n > 0)
{
std::cout << tid <<"::"<< connfd <<" Date: ["<< buf <<"]" << "events "<< eventsID << std::endl;
epfd.data.fd = connfd;
epfd.events = events[eventsID].events | EPOLLOUT;
if(epoll_ctl(epollfd, EPOLL_CTL_MOD, connfd, &epfd) == -1)
{
std::cout << "epoll_ctl return -1"<< std::endl;
exit(1);
}
set_workList(connfd, buf);
//usleep(100);
}else if (nread == 0)
{
std::cout << connfd << "is go" << std::endl;
close(connfd);
epfd = events[eventsID];
epoll_ctl(epollfd, EPOLL_CTL_DEL, connfd, &epfd);
}else{ //【修改9:判斷nread<0 的情況】
if(errno != EAGAIN)
{
std::cout <<" eventsID:"<< eventsID << "connfd:"<< connfd << std::endl;
perror("read:");
}
}
pthread_mutex_unlock(&lock2);//【修改10:解鎖2】
}
}
int make_write_worker(int pthreadNum)
{
int err[pthreadNum], error;
prhread_write_num = pthreadNum;
for(int i=0; i<pthreadNum; i++)
{
err[i] = pthread_create(&nwTid[i], NULL, write_worker, NULL);
if(err[i] != 0)
{
std::cout << "make pthread error :" << err[i] << std::endl;
exit(1);
}
error += err[i];
std::cout << "\033[32mNO.\033[0m"<< i+1 << "\033[32m, pthread creation successful!\033[0m" << std::endl;
}
return error;
}
void *write_worker(void *arg)
{
pthread_t tid;
tid = pthread_self(); //get pthread id
WorkNode wJob;
int connfd, n, nwrite;
char buf[100];
std::cout << "???? I am WRITE worker ????" << tid<< std::endl;
while(1)
{
pthread_mutex_lock(&lock4);//【修改11:寫時加鎖4】
if (waitlist.size() < 1)//【修改12:防止誤觸發】
{
pthread_cond_wait(&has_write, &lock);
}
wJob = get_workList();
//connfd = wJob.connfd;
connfd = 5; //【修改13:發送數據到第一個鏈接的客戶端】
strcpy(buf, wJob.wBuf);
n = size_buf(buf);
while(n > 0)//write ot over
{
nwrite = write(connfd, buf, n);
n -= nwrite;
}
//usleep(100);
pthread_mutex_unlock(&lock4); //【修改14:解鎖4】
}
}
int destroy_pthread()
{
for(int i=0; i< prhread_read_num; ++i)
{
pthread_join(nrTid[i], NULL);
}
for(int i=0; i< prhread_write_num; ++i)
{
pthread_join(nwTid[i], NULL);
}
return 0;
}
int size_buf(char *buf)
{
int i;
for ( i = 0; buf[i] != '\0'; i++);
return i+1;
}
```