live555多線程實現方案概述

EasyRTSPServer流媒體服務器基於live555改造而來,live555天生是個單線程, 用在攝像機上效率沒有問題,畢竟連接數不多,這已經在EasyIPCamera中得到驗證. 如果用在NVR或流媒體服務器時,基於性能考量,則必須要改造爲多線程。

這是一個比較困難的過程, 多線程的處理主要集中在GenericMediaServer, 並且要將多線程中的UsageEnvironment傳遞到各個子模塊, 而不是單線程中的envir()就行了,具體參考下面的代碼和註釋。

多線程的結構體定義在GenericMediaServer中;
在創建GenericMediaServer時,創建多線程對象,但並不創建實際線程, 實際代碼如下;


GenericMediaServer
::GenericMediaServer(UsageEnvironment& env, int ourSocketV4, int ourSocketV6, Port ourPort,
		     unsigned reclamationSeconds, void *_callback, void *_userptr)
  : Medium(env),
    fServerSocket4(ourSocketV4), fServerSocket6(ourSocketV6), 
	fServerPort(ourPort), fReclamationSeconds(reclamationSeconds),
    fServerMediaSessions(HashTable::create(STRING_HASH_KEYS)),
    fClientConnections(HashTable::create(ONE_WORD_HASH_KEYS)),
    fClientSessions(HashTable::create(STRING_HASH_KEYS)) {
  ignoreSigPipeOnSocket(fServerSocket4); // so that clients on the same host that are killed don't also kill us

  if (fServerSocket6 > 0)
  {
	ignoreSigPipeOnSocket(fServerSocket6); // so that clients on the same host that are killed don't also kill us
  }


#ifdef LIVE_MULTI_THREAD_ENABLE

  InitMutex(&mutexServerMediaSession);
  InitMutex(&mutexClientConnection);
  InitMutex(&mutexClientSession);
  
  mCallbackPtr	=	_callback;
  mUserPtr		=	_userptr;

  memset(&multiThreadCore, 0x00, sizeof(MultiThread_CORE_T));
  multiThreadCore.threadNum = MAX_DEFAULT_MULTI_THREAD_NUM;
  multiThreadCore.threadTask = new LIVE_THREAD_TASK_T[multiThreadCore.threadNum];
  memset(&multiThreadCore.threadTask[0], 0x00, sizeof(LIVE_THREAD_TASK_T) * multiThreadCore.threadNum);
  for (int i=0; i<multiThreadCore.threadNum; i++)
  {
	  char szName[36] = {0};
	  sprintf(szName, "worker thread %d", i);
	  multiThreadCore.threadTask[i].id = i;
	  multiThreadCore.threadTask[i].extPtr = this;
#ifdef _EPOLL_
	  multiThreadCore.threadTask[i].pSubScheduler = BasicTaskSchedulerEpoll::createNew(i+1, MAX_EPOLL_WORKER_THREAD_EVENT);
#else
	  multiThreadCore.threadTask[i].pSubScheduler = BasicTaskScheduler::createNew(i+1, MAX_EPOLL_WORKER_THREAD_EVENT);
#endif
	  multiThreadCore.threadTask[i].pSubEnv = BasicUsageEnvironment::createNew(*multiThreadCore.threadTask[i].pSubScheduler, i+1, szName);
  }
#endif

  // Arrange to handle connections from others:
  env.taskScheduler().turnOnBackgroundReadHandling(fServerSocket4, incomingConnectionHandler4, this);
  if (fServerSocket6 > 0)
  {
	env.taskScheduler().turnOnBackgroundReadHandling(fServerSocket6, incomingConnectionHandler6, this);
  }
}

在收到DESCRIBE請求後,再根據URL中的後綴,創建相應線程, 代碼如下:

UsageEnvironment *GenericMediaServer::GetEnvBySuffix(UsageEnvironment *pMainThreadEnv, const char *urlSuffix, void *pClientConnection, 
										LIVE_THREAD_TASK_T **pThreadTask, Boolean bLockServerMediaSession)
{
	GenericMediaServer::ClientConnection	*pClient = (GenericMediaServer::ClientConnection *)pClientConnection;

	int iFreeIdx = -1;
	UsageEnvironment *pEnv = NULL;

	if ( (int)strlen(urlSuffix) < 1)
	{
		return NULL;
	}

	char streamName[512] = {0};

	int		iProcRet = 0;
	Boolean bRequestTooMany = False;
	if (bLockServerMediaSession)		LockServerMediaSession(pMainThreadEnv->GetEnvirName(), (char*)"GenericMediaServer::GetEnvBySuffix", (unsigned long long)this);

	do
	{
		for (int i=0; i<multiThreadCore.threadNum; i++)
		{
			if ( (iFreeIdx<0) && (((int)strlen(multiThreadCore.threadTask[i].liveURLSuffix) < 1 )) && (multiThreadCore.threadTask[i].releaseChannel==0x00) ) 
			{
				iFreeIdx = i;
			}
			if ( 0 == strcmp(urlSuffix, multiThreadCore.threadTask[i].liveURLSuffix))
			{
				if (multiThreadCore.threadTask[i].releaseChannel>0x00)
				{
					iProcRet = -1;
					_TRACE(TRACE_LOG_DEBUG, (char *)"[%s] 當前通道正在被刪除. 請稍候訪問: %s\n", multiThreadCore.threadTask[i].pSubEnv->GetEnvirName(), urlSuffix);
					break;
				}

				if (NULL == multiThreadCore.threadTask[i].pSubEnv)
				{
					iProcRet = -2;
					break;
				}

				if (multiThreadCore.threadTask[i].pSubEnv->GetStreamStatus() == 0x00)
				{
					iProcRet = -3;
					break;
				}


				multiThreadCore.threadTask[i].pSubEnv->LockEnvir("GenericMediaServer::GetEnvBySuffix", (unsigned long long)this);
				if (multiThreadCore.threadTask[i].pSubEnv->GetLockFlag() != 0x00)
				{
					iProcRet = -4;
					multiThreadCore.threadTask[i].pSubEnv->UnlockEnvir("GenericMediaServer::GetEnvBySuffix", (unsigned long long)this);
					break;
				}
				
				bool assignEnv = false;
				for (int k=0; k<MAX_BATCH_CLIENT_NUM; k++)
				{
					if (NULL == multiThreadCore.threadTask[i].pClientConnectionPtr[k])
					{
						assignEnv = true;
						multiThreadCore.threadTask[i].pClientConnectionPtr[k] = pClient;

						_TRACE(TRACE_LOG_INFO, (char*)"GenericMediaServer::GetEnvBySuffix  [%s] set [%d] to Index[%d]\n", urlSuffix, pClient->fOurSocket, k);

						strcpy(streamName, urlSuffix);

						break;
					}
				}

				if (assignEnv)
				{
					pEnv = multiThreadCore.threadTask[i].pSubEnv;
					//multiThreadCore.threadTask[i].subSocket = pClient->fOurSocket;
					pClient->pClientConnectionEnv = multiThreadCore.threadTask[i].pSubEnv;

					//multiThreadCore.threadTask[i].handleDescribe = 0x01;
					//*handleDescribe = &multiThreadCore.threadTask[i].handleDescribe;
					if (NULL != pThreadTask)	*pThreadTask = &multiThreadCore.threadTask[i];

					multiThreadCore.threadTask[i].clientNum ++;

					pEnv->IncrementReferenceCount();		//增加引用計數
					//_TRACE(TRACE_LOG_WARNING, (char*)"######## pEnv->IncrementReferenceCount 增加引用計數[%d].\n", pEnv->GetReferenceCount());

					iProcRet = 0;

					_TRACE(TRACE_LOG_INFO, (char*)"共用通道GenericMediaServer::GetEnvBySuffix:: Channel already exist. New Connection[%d]   [%s][%s] ClientNum[%d]\n",
									pClient->fOurSocket, pClient->pClientConnectionEnv->GetEnvirName(), urlSuffix, 
									multiThreadCore.threadTask[i].clientNum);
				}
				else
				{
					//沒有找到有效的Env, 說明客戶端列表已滿

					iProcRet = -10;
					_TRACE(TRACE_LOG_ERROR, (char*)"GenericMediaServer::GetEnvBySuffix 當前通道客戶端已滿[%s]\n", urlSuffix);
				}

				multiThreadCore.threadTask[i].pSubEnv->UnlockEnvir("GenericMediaServer::GetEnvBySuffix", (unsigned long long)this);
				break;
			}
		}
		if (pEnv)			break;
		if (iFreeIdx<0)		break;

		if (iProcRet < 0)	break;

		if (NULL == multiThreadCore.threadTask[iFreeIdx].osThread)
		{
			CreateOSThread( &multiThreadCore.threadTask[iFreeIdx].osThread, __WorkerThread_Proc, (void *)&multiThreadCore.threadTask[iFreeIdx] );
		}

		
		multiThreadCore.threadTask[iFreeIdx].pClientConnectionPtr[0] = pClient;
		//multiThreadCore.threadTask[iFreeIdx].subSocket = pClient->fOurSocket;
		pClient->pClientConnectionEnv = multiThreadCore.threadTask[iFreeIdx].pSubEnv;

		pEnv = pClient->pClientConnectionEnv;

		strcpy(multiThreadCore.threadTask[iFreeIdx].liveURLSuffix, urlSuffix);

		strcpy(streamName, multiThreadCore.threadTask[iFreeIdx].liveURLSuffix);

		pEnv->IncrementReferenceCount();		//增加引用計數
		//_TRACE(TRACE_LOG_WARNING, (char*)"######## pEnv->IncrementReferenceCount 初始引用計數爲%d.\n", pEnv->GetReferenceCount());

		//envir().taskScheduler().disableBackgroundHandling(pClient->fOurSocket);
		//pClient->pEnv->taskScheduler().turnOnBackgroundReadHandling(pClient->fOurSocket,  (TaskScheduler::BackgroundHandlerProc*)&GenericMediaServer::ClientConnection::incomingRequestHandler, this);

		//multiThreadCore.threadTask[iFreeIdx].handleDescribe = 0x01;

		//*handleDescribe = &multiThreadCore.threadTask[iFreeIdx].handleDescribe;
		if (NULL != pThreadTask)	*pThreadTask = &multiThreadCore.threadTask[iFreeIdx];

		multiThreadCore.threadTask[iFreeIdx].clientNum ++;

		_TRACE(TRACE_LOG_INFO, (char*)"新建通道  GenericMediaServer::GetEnvBySuffix New Connection[%d] [%s][%s] ClientNum[%d]\n",
						pClient->fOurSocket, pClient->pClientConnectionEnv->GetEnvirName(), 
						multiThreadCore.threadTask[iFreeIdx].liveURLSuffix, 
						multiThreadCore.threadTask[iFreeIdx].clientNum);
	}while (0);

	if (bLockServerMediaSession)		UnlockServerMediaSession(pMainThreadEnv->GetEnvirName(), "GenericMediaServer::GetEnvBySuffix", (unsigned long long)this);

	return pEnv;
}

這樣, 每個不同的請求將分配給對應的線程進行處理,線程之間互不干擾;
在某一個通道對應的所有客戶端都斷開後, 該通道資源將被回覆, 但相應線程不會被刪除, 一方面是爲了再次複用, 另一方面是爲了穩定性考量;

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章