C++ 委託

最近看了看C#委託,突然想到C++有沒有委託呢,一查之下,原來是有的,參考了很多人的博客,自己也整理了下,寫下來,做個筆記。

委託的作用

我想理解一樣知識,最關鍵的是理解這是幹什麼的,同樣,委託有什麼作用呢?我查了查網上的資料,各有各的說法。我的理解主要是以下四點:
  1. 實現策略模式,隔離變化
  2. 實現觀察者模式,也就是multicast delegate 
  3. 解耦和,這個有點像面向接口的編程(依賴倒置)
  4. 實現異步任務。
上面只是應用,具體到委託的實現方式,我目前只實現了同步委託(包含前三點),和異步委託(實現異步任務)。

同步委託

同步委託說白了就是對函數指針的封裝,multicast delegate添加了一個簡單的觀察者模式,下面的代碼簡單描述了這種思想:

class IDelegate
{
private:
	std::vector<IDelegate *> mList;

public:
	virtual void Invoke(WPARAM wParam, LPARAM lParam) {};

	IDelegate& operator += (IDelegate & iDelegate)
	{
		mList.push_back(&iDelegate);
		return *this;
	}

	virtual void Signal(WPARAM wParam, LPARAM lParam)
	{
		for (auto ptr = mList.begin(); ptr != mList.end(); ptr++)
		{
			(*ptr)->Invoke(wParam, lParam);
		}
	}
};

template<typename T>
class CDelegate : public IDelegate
{
	typedef void(T::* PFunc)(WPARAM wParam, LPARAM lParam);
private:
	T * m_pThis;
	PFunc m_pFunc;

public:
	CDelegate(T *pThis, PFunc pFunc)
	{
		m_pThis = pThis;
		m_pFunc = pFunc;
	}

	virtual void Invoke(WPARAM wParam, LPARAM lParam)
	{
		(m_pThis->*m_pFunc)(wParam, lParam);
	}
};

測試代碼如下:
//
//   TEST DELEGATE
//

class CA
{
public: 
	void Calc(WPARAM wParam, LPARAM lParam)
	{
		int count = 0;
		for (int i = 0; i < 1000; ++i)
		{
			count += i;
			Sleep(1);
		}
		TRACE(_T("Thread[%d] invoke wParam = %d, lParam = %d"), GetCurrentThreadId(), wParam, count);
	}
};

class CB
{	
public:

	System::Concurrency::IDelegate mDelegate;

	void Notify()
	{
		mDelegate.Signal(30202, 2020101);
	}
};

int _tmain(int argc, _TCHAR* argv[])
{
	TRACE(_T("Thread[%d] start main"), GetCurrentThreadId());


	//
	//  Synchronize delegate
	//
	CA ca1; 
	System::Concurrency::CDelegate<CA> caDelegate1(&ca1, &CA::Calc);

	CA ca2; 
	System::Concurrency::CDelegate<CA> caDelegate2(&ca2, &CA::Calc);

	CB cb;
	cb.mDelegate += caDelegate1;
	cb.mDelegate += caDelegate2;
	cb.Notify();
}

異步委託

我下面主要說說異步委託,實際上我一直很懷疑自己是不是將這個概念理解錯誤,畢竟和C#裏面的task 很相似,anyway,能用就好,能解決問題就行。異步委託實際上是通過將任務(函數調用)發送給工作線程來執行,從而避免調用線程被阻塞。比如說我們項目中經常碰到的存儲數據(數據量較小)到文件,開個單獨的線程吧,覺得浪費了,不開吧,頻繁得寫文件,導致調用線程性能下降。所以想來想去,異步委託是非常合適的一種方法。下面是我的實現:

1. 實現異步任務接口:
class ITask
{
public:		
	WPARAM wParam;
	LPARAM lParam;
	HANDLE hEvent;

public:
	ITask()
	{
		wParam = 0;
		lParam = 0;
		hEvent = NULL;
	}

	virtual void Execute() = 0;
	virtual void Complete() { }; 

	virtual void Signal()
	{
		if (hEvent != NULL)
		{
			SetEvent(hEvent);
		}
	}
};

2. 實現異步委託接口,通過異步委託來發送task.
template<typename T> class CAsyncTask : public ITask
{
public:
	T *pThis;
	typedef void (T::* PFunc)(WPARAM wParam, LPARAM lParam);
	PFunc pFunc; // function pointer to method of class T

	virtual void Execute()
	{
		if (pThis != NULL && pFunc != NULL)
		{
			(pThis->*pFunc)(wParam, lParam);
		}

		if (hEvent != NULL)
		{
			SetEvent(hEvent);
		}
	}
};

template<typename T>
class CAsyncDelegate
{
	typedef void(T::* PFunc)(WPARAM wParam, LPARAM lParam);
private:
	T * m_pThis;
	PFunc m_pFunc;
	HANDLE m_hEvent;
	bool m_bComplete;
public:
	CAsyncDelegate(T *pThis, PFunc pFunc)
	{
		m_pThis = pThis;
		m_pFunc = pFunc;

		m_hEvent = CreateEvent(NULL, TRUE, FALSE, _T("Async"));
		m_bComplete = true;
	}

	~CAsyncDelegate()
	{
		if (m_hEvent != NULL)
		{
			CloseHandle(m_hEvent);
			m_hEvent = NULL;
		}
	}

	void BeginInvoke(WPARAM wParam, LPARAM lParam)
	{
		CAsyncTask<T> *task = new CAsyncTask<T>();
		task->pThis = m_pThis;
		task->pFunc = m_pFunc;
		task->wParam = wParam;
		task->lParam = lParam;
		task->hEvent = m_hEvent;

		System::Concurrency::ThreadPool.SubmitTask(task);
		m_bComplete = false;
	}

	void EndInvoke(DWORD dwTimetout = INFINITE)
	{
		if (!m_bComplete)
		{
			DWORD dw = WaitForSingleObject(m_hEvent, dwTimetout);
			if (dw == WAIT_OBJECT_0)
			{
				ResetEvent(m_hEvent);
				m_bComplete = true;
			}
		}
	}

	bool IsCompleted() const
	{
		return m_bComplete;
	}
};


3. 實現工作者線程,工作者線程採用windows 線程池技術。曾經我想過實現自己的線程池,不過後來想想,windows提供了這麼好的功能,我爲什麼不用呢。至於windows線程池的一些方法和函數,我覺得我說的沒有MSDN好,所以我就不說了。

ThreadPool.h
class CThreadPool
{
public:
	CThreadPool();
	~CThreadPool();

	/* Create thread pool and specify the minimum number and max number of threads that
	 * thread pool could create and use. */
	bool Create(const int nMinThreads, const int nMaxThreads);

	/* Close thread pool, cleanup the resources. */
	void Close();

	/* Submit a task to thread pool */
	bool SubmitTask(ITask *pTask);

	int GetTasks() const;

protected:

	struct tWorkContext
	{
		CThreadPool *pThis;
		ITask *pTask;
	};

	/* Callback function.... */
	static VOID CALLBACK _WorkCallBack(PTP_CALLBACK_INSTANCE, PVOID, PTP_WORK);

	void incTasks();
	void decTasks();

private:
	PTP_POOL  m_threadPool;
	PTP_CLEANUP_GROUP m_cleanupGroup;
	TP_CALLBACK_ENVIRON m_callbackEnv;
	bool m_isThreadPoolWorking;
	int m_nTasks;  // Count the number of tasks, including executing tasks and outstanding tasks.
};

extern CThreadPool ThreadPool;

ThreadPool.cpp
CThreadPool ThreadPool;

CThreadPool::CThreadPool()
{
	m_threadPool = NULL;
	m_cleanupGroup = NULL;
	m_isThreadPoolWorking = false;
	m_nTasks = 0;
}

CThreadPool::~CThreadPool()
{

}

bool CThreadPool::Create(const int nMinThreads, const int nMaxThreads)
{
	ASSERT(nMinThreads > 0);
	ASSERT(nMaxThreads > 1);
	ASSERT(nMaxThreads > nMinThreads);

	#ifndef FAILED
	#define FAILED(step) TRACE(_T("%s failed error=%d"), step, GetLastError());
	#endif

	BOOL bSucceed = TRUE;

	// Initializes a callback environment. 
	// The thread pool must associate itself with this callback environment.
	InitializeThreadpoolEnvironment(&m_callbackEnv);

	m_threadPool = CreateThreadpool(NULL);
	if (m_threadPool == NULL)
	{
		FAILED(_T("create thread pool"));
		bSucceed = FALSE;
	}

	if (bSucceed)
	{
		bSucceed = SetThreadpoolThreadMinimum(m_threadPool, (DWORD)nMinThreads);
		SetThreadpoolThreadMaximum(m_threadPool, (DWORD)nMaxThreads);
		if (!bSucceed) 
			FAILED(_T("SetThreadpoolThreadMinimum"));
	}

	// Associate the callback environment with thread pool.
	if (bSucceed)
	{
		SetThreadpoolCallbackPool(&m_callbackEnv, m_threadPool);
	}

	// Create new cleaup up group and associate it with thread pool (callback environment)
	m_cleanupGroup = CreateThreadpoolCleanupGroup();
	if (m_cleanupGroup != NULL)
	{
		SetThreadpoolCallbackCleanupGroup(&m_callbackEnv, m_cleanupGroup, NULL);
	}
	else
	{
		bSucceed = FALSE;
		FAILED(_T("CreateThreadpoolCleanupGroup"));
	}

	// If initialize fails, cleanup the resouces.
	if (!bSucceed)
	{
		if (m_cleanupGroup != NULL)
		{
			CloseThreadpoolCleanupGroup(m_cleanupGroup);
			m_cleanupGroup = NULL;
		}

		if (m_threadPool != NULL)
		{
			CloseThreadpool(m_threadPool);
			m_threadPool = NULL;
		}
	}

	#undef  FAILED

	m_isThreadPoolWorking = (bSucceed == TRUE);
	
	return m_isThreadPoolWorking;
}

void CThreadPool::Close()
{
	m_isThreadPoolWorking = false;

	if (m_cleanupGroup != NULL)
	{
		// Release the members of specified cleanup group. And then close the cleanup group.
		// @ fCancelPendingCallbacks:
		//   -FALSE: Waits for outstanding callback functions to complete. 
		//   -TRUE : Cancal outstanding callbacks that have not yet started. In the other words, 
		//           it means that it will blocks until all currently executing funtions finish, 
		//           but, it will cancel callbacks which are still waiting for executing.
		//
		// After calling CloseThreadpoolCleanupGroupMembers, any other release of objects should not do,
		// because all objects have been released by it.
		//
		CloseThreadpoolCleanupGroupMembers(m_cleanupGroup, FALSE/*fCancelPendingCallbacks */, NULL);
		CloseThreadpoolCleanupGroup(m_cleanupGroup);
		m_cleanupGroup = NULL;
	}

	if (m_threadPool != NULL)
	{
		CloseThreadpool(m_threadPool);
		m_threadPool = NULL;
	}

	DestroyThreadpoolEnvironment(&m_callbackEnv);
}

bool CThreadPool::SubmitTask(ITask *pTask)
{
	ASSERT(pTask != NULL);

	if (!m_isThreadPoolWorking)
	{
		return false;
	}

	tWorkContext *pWorkContext = new tWorkContext();
	pWorkContext->pThis = this;
	pWorkContext->pTask = pTask;

	PTP_WORK pWork = CreateThreadpoolWork(_WorkCallBack, (PVOID)pWorkContext, &m_callbackEnv);
	if (pWork != NULL)
	{
		SubmitThreadpoolWork(pWork);
		incTasks();
	}
	else
	{
		TRACE(_T("CreateThreadpoolWork failed. error=%d"), GetLastError());
	}

	return (pWork != NULL);
}

/*static*/ 
VOID CALLBACK CThreadPool::_WorkCallBack(PTP_CALLBACK_INSTANCE Instance, PVOID context, PTP_WORK work)
{
	tWorkContext *pWorkContext = static_cast<tWorkContext *>(context);
	if (pWorkContext != NULL)
	{
		if (pWorkContext->pTask != NULL)
		{
			pWorkContext->pTask->Execute();
			pWorkContext->pTask->Complete();
			pWorkContext->pTask->Signal();

			// Here, release task object, but, don't close event handle, because
			// this event should managed by caller, it belong to caller, so caller
			// is responsible for releasing it.
			delete pWorkContext->pTask; 
		}

		pWorkContext->pThis->decTasks();

		delete pWorkContext;
	}
}

int CThreadPool::GetTasks() const
{
	return m_nTasks;
}

void CThreadPool::incTasks()
{
	++m_nTasks;
}

void CThreadPool::decTasks()
{
	--m_nTasks;
}

測試代碼:
	//
	//  Asynchronize delegate...
	//
	System::Concurrency::ThreadPool.Create(2, 20);

	std::vector<System::Concurrency::CAsyncDelegate<CA> *> waitList;
	for (int i = 0; i < 500; ++i)
	{
		CA ca1;
		System::Concurrency::CAsyncDelegate<CA> *asyncDelegate = new System::Concurrency::CAsyncDelegate<CA>(&ca1, &CA::Calc);
		asyncDelegate->BeginInvoke(i, i);
		waitList.push_back(asyncDelegate);
	}

	TRACE(_T("pushed all list, now, waiting..."));

	for (auto ptr = waitList.begin(); ptr != waitList.end(); ptr++)
	{
		if (!(*ptr)->IsCompleted())
		{
			(*ptr)->EndInvoke();
		//	TRACE(_T("one more done."));
		}
	}

	for (auto ptr = waitList.begin(); ptr != waitList.end(); ptr++)
	{
		delete (*ptr);
	}
	waitList.clear();

	System::Concurrency::ThreadPool.Close();



OK,以上就是委託的大概內容了,不過,這段代碼只是測試代碼,沒有考慮很多問題,比如說多線程同步,如線程池裏面的m_nTasks等變量都沒有進行同步處理,所以說如果需要在真實項目中使用,還需要在完善!


發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章