在之前的博客中我們描述說過怎樣將單線程改造爲多線程, 現就多線程的效率問題再補充一點說明。
在GenericMediaServer.h中聲明MultiThread_CORE_T結構體,如下:
#define MAX_BATCH_CLIENT_NUM 5
typedef struct __LIVE_THREAD_TASK_T
{
int id;
TaskScheduler *pSubScheduler;
UsageEnvironment *pSubEnv;
char liveURLSuffix[512];
int releaseChannel; //釋放標記
int handleDescribe;
OSTHREAD_OBJ_T *osThread; //線程對象
int clientNum;
void *pClientConnectionPtr[MAX_BATCH_CLIENT_NUM];
void *procPtr;
void *extPtr;
}LIVE_THREAD_TASK_T;
#define MAX_DEFAULT_MULTI_THREAD_NUM 256 //最大支持通道數
typedef struct __MultiThread_CORE_T
{
int threadNum;
LIVE_THREAD_TASK_T *threadTask;
}MultiThread_CORE_T;
在GenericMediaServer構造函數中,僅創建256個MultiThread_CORE_T, 實際的線程並不在此創建;
在處理客戶端的DESCRIBE請求時, 先驗證請求的資源是否在已有列表中, 如沒有,這時纔開始創建相應的工作線程, 如下:
//如果當前是主線程,則進入到查找通道流程
if (pEnv->GetEnvirId() == MAIN_THREAD_ID)
{
UsageEnvironment *pChEnv = fOurServer.GetEnvBySuffix(pEnv, urlTotalSuffix, this, pThreadTask, True);
if (NULL == pChEnv)
{
handleCmdRet = -1;
this->pClientConnectionEnv = NULL;
handleCmd_notFound();
break;
}
else
{
_TRACE(TRACE_LOG_DEBUG, (char*)"[%s]Set socket[%d] Assign to [%d:%s]\n", pEnv->GetEnvirName(), this->fOurSocket, pChEnv->GetEnvirId(), pChEnv->GetEnvirName());
//將socket從主線程移到工作線程中
pEnv->taskScheduler().disableBackgroundHandling(fOurSocket);
return MAIN_THREAD_ID;
}
break;
}
主線程中主要調用了GenericMediaServer的GetEnvBySuffix函數,該函數實現了主線程中任務的分配,如下:
UsageEnvironment *GenericMediaServer::GetEnvBySuffix(UsageEnvironment *pMainThreadEnv, const char *urlSuffix, void *pClientConnection,
LIVE_THREAD_TASK_T **pThreadTask, Boolean bLockServerMediaSession)
{
GenericMediaServer::ClientConnection *pClient = (GenericMediaServer::ClientConnection *)pClientConnection;
int iFreeIdx = -1;
UsageEnvironment *pEnv = NULL;
if ( (int)strlen(urlSuffix) < 1)
{
return NULL;
}
char streamName[512] = {0};
int iProcRet = 0;
Boolean bRequestTooMany = False;
if (bLockServerMediaSession) LockServerMediaSession(pMainThreadEnv->GetEnvirName(), (char*)"GenericMediaServer::GetEnvBySuffix", (unsigned long long)this);
do
{
for (int i=0; i<multiThreadCore.threadNum; i++)
{
if ( (iFreeIdx<0) && (((int)strlen(multiThreadCore.threadTask[i].liveURLSuffix) < 1 )) && (multiThreadCore.threadTask[i].releaseChannel==0x00) )
{
iFreeIdx = i;
}
if ( 0 == strcmp(urlSuffix, multiThreadCore.threadTask[i].liveURLSuffix))
{
if (multiThreadCore.threadTask[i].releaseChannel>0x00)
{
iProcRet = -1;
_TRACE(TRACE_LOG_DEBUG, (char *)"[%s] 當前通道正在被刪除. 請稍候訪問: %s\n", multiThreadCore.threadTask[i].pSubEnv->GetEnvirName(), urlSuffix);
break;
}
if (NULL == multiThreadCore.threadTask[i].pSubEnv)
{
iProcRet = -2;
break;
}
if (multiThreadCore.threadTask[i].pSubEnv->GetStreamStatus() == 0x00)
{
iProcRet = -3;
break;
}
multiThreadCore.threadTask[i].pSubEnv->LockEnvir("GenericMediaServer::GetEnvBySuffix", (unsigned long long)this);
if (multiThreadCore.threadTask[i].pSubEnv->GetLockFlag() != 0x00)
{
iProcRet = -4;
multiThreadCore.threadTask[i].pSubEnv->UnlockEnvir("GenericMediaServer::GetEnvBySuffix", (unsigned long long)this);
break;
}
bool assignEnv = false;
for (int k=0; k<MAX_BATCH_CLIENT_NUM; k++)
{
if (NULL == multiThreadCore.threadTask[i].pClientConnectionPtr[k])
{
assignEnv = true;
multiThreadCore.threadTask[i].pClientConnectionPtr[k] = pClient;
_TRACE(TRACE_LOG_INFO, (char*)"GenericMediaServer::GetEnvBySuffix [%s] set [%d] to Index[%d]\n", urlSuffix, pClient->fOurSocket, k);
strcpy(streamName, urlSuffix);
break;
}
}
if (assignEnv)
{
pEnv = multiThreadCore.threadTask[i].pSubEnv;
//multiThreadCore.threadTask[i].subSocket = pClient->fOurSocket;
pClient->pClientConnectionEnv = multiThreadCore.threadTask[i].pSubEnv;
//multiThreadCore.threadTask[i].handleDescribe = 0x01;
//*handleDescribe = &multiThreadCore.threadTask[i].handleDescribe;
if (NULL != pThreadTask) *pThreadTask = &multiThreadCore.threadTask[i];
multiThreadCore.threadTask[i].clientNum ++;
pEnv->IncrementReferenceCount(); //增加引用計數
iProcRet = 0;
_TRACE(TRACE_LOG_INFO, (char*)"共用通道GenericMediaServer::GetEnvBySuffix:: Channel already exist. New Connection[%d] [%s][%s] ClientNum[%d]\n",
pClient->fOurSocket, pClient->pClientConnectionEnv->GetEnvirName(), urlSuffix,
multiThreadCore.threadTask[i].clientNum);
}
else
{
//沒有找到有效的Env, 說明客戶端列表已滿
iProcRet = -10;
_TRACE(TRACE_LOG_ERROR, (char*)"GenericMediaServer::GetEnvBySuffix 當前通道客戶端已滿[%s]\n", urlSuffix);
}
multiThreadCore.threadTask[i].pSubEnv->UnlockEnvir("GenericMediaServer::GetEnvBySuffix", (unsigned long long)this);
break;
}
}
if (pEnv) break;
if (iFreeIdx<0) break;
if (iProcRet < 0) break;
if (NULL == multiThreadCore.threadTask[iFreeIdx].osThread)
{
CreateOSThread( &multiThreadCore.threadTask[iFreeIdx].osThread, __WorkerThread_Proc, (void *)&multiThreadCore.threadTask[iFreeIdx] );
}
multiThreadCore.threadTask[iFreeIdx].pClientConnectionPtr[0] = pClient;
pClient->pClientConnectionEnv = multiThreadCore.threadTask[iFreeIdx].pSubEnv;
#ifdef _DEBUG
for (int i=0; i<multiThreadCore.threadNum; i++)
{
if ( (int)strlen(multiThreadCore.threadTask[i].liveURLSuffix) > 0)
{
_TRACE(TRACE_LOG_DEBUG, (char *)"通道列表[%d:%s]: %s\n", i, multiThreadCore.threadTask[i].pSubEnv->GetEnvirName(), multiThreadCore.threadTask[i].liveURLSuffix);
if ( (0 == strcmp(urlSuffix, multiThreadCore.threadTask[i].liveURLSuffix)) )
{
multiThreadCore.threadTask[i].releaseChannel = multiThreadCore.threadTask[i].releaseChannel;
}
}
}
#endif
pEnv = pClient->pClientConnectionEnv;
strcpy(multiThreadCore.threadTask[iFreeIdx].liveURLSuffix, urlSuffix);
strcpy(streamName, multiThreadCore.threadTask[iFreeIdx].liveURLSuffix);
pEnv->IncrementReferenceCount(); //增加引用計數
if (NULL != pThreadTask) *pThreadTask = &multiThreadCore.threadTask[iFreeIdx];
multiThreadCore.threadTask[iFreeIdx].clientNum ++;
_TRACE(TRACE_LOG_INFO, (char*)"新建通道 GenericMediaServer::GetEnvBySuffix New Connection[%d] [%s][%s] ClientNum[%d]\n",
pClient->fOurSocket, pClient->pClientConnectionEnv->GetEnvirName(),
multiThreadCore.threadTask[iFreeIdx].liveURLSuffix,
multiThreadCore.threadTask[iFreeIdx].clientNum);
}while (0);
if (bLockServerMediaSession) UnlockServerMediaSession(pMainThreadEnv->GetEnvirName(), "GenericMediaServer::GetEnvBySuffix", (unsigned long long)this);
//UnlockClientConnection();
if (NULL != pEnv)
{
if ( (int)strlen(streamName) < 1)
{
_TRACE(TRACE_LOG_DEBUG, (char *)"#### ERROR\n");
}
}
return pEnv;
}
到這裏爲止,針對當前客戶端,主線程的工作已全部結束, 剩下的就是工作線程來工作了,工作線程被創建成功後,一直檢測是否有需要處理的任務, 如果有新的客戶端被分配到該線程,則又從handleCmd_DESCRIBE開始處理。
總結
live555工作線程和主線程的銜接點是在RTSPServer::RTSPClientConnection::handleCmd_DESCRIBE,即主線程僅處理到DESCRIBE命令,後面的處理全部由工作線程完成。