linux下的線程池的實現

以爲自己設計的一個線程池操作樣例,參考libwebsocket 的源碼中的線程池實現

/*
 * File name	: cetthreadpool.c
 *
 * Created on	: 2020年5月13日 17:02:20
 * Author		: Firmware of xiyuan255
 * Version		: 2.0
 * Language		: C
 * Copyright	: Copyright (C) 2019, xiyuan255 Inc.
 *
 */
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include <assert.h>
#include <errno.h>
#include <unistd.h>
#include <sys/time.h>
#include <sys/types.h>
#include <pthread.h>

enum cet_threadpool_task_status {
	CET_TP_STATUS_QUEUED = 1,	/**< 任務入隊完成 */
	CET_TP_STATUS_RUNNING,		/**< 任務正在運行中 */
	CET_TP_STATUS_SYNCING,		/**< 任務正在同步中 */
	CET_TP_STATUS_STOPPING,		/**< 任務正在停止運行中 */
	CET_TP_STATUS_STOPPED, 		/**< 任務停止完成 */
};

typedef unsigned long cet_usec_t;

/**
	* 任務的數據結構
*/
typedef struct cet_handler_task
{
	struct cet_threadpool 	*tpool;
	struct cet_handler_task *task_queue_next;		
	
	char name[32];				/**< 任務的名稱 */
	void *args;					/**< 任務對應的回調函數的參數 */
	int  ( *routine )(void *);	/**< 任務對應的回調函數 */

	cet_usec_t created;			/**< 任務被創建的時間 */
	cet_usec_t activated;		/**< 任務被激活執行的時間 */
	cet_usec_t entered_state;	/**< 任務狀態被改變的時間 */

	cet_usec_t acc_running;		/**< 任務運行的耗時時間 */
	cet_usec_t acc_syncing;		/**< 任務同步的耗時時間 */
	
	pthread_cond_t wake_idle;	/**< 任務對應的條件變量 */
	enum cet_threadpool_task_status status;
	
} cet_handler_task_t;

/**
	* 任務池的數據結構
*/
typedef struct cet_taskpool
{
	struct cet_threadpool 	*tpool;
	pthread_t				thread_id;		/**< 線程的ID值 */
	pthread_mutex_t 		lock;			/**< 線程的互斥鎖 */
	cet_handler_task_t 		*task; 			/**< 線程對應的任務指針 */

	cet_usec_t 				activated;		/**< 任務的激活時間 */
	int 					worker_id;		/**< 當前線程在線程表中的索引位置 */
	
} cet_taskpool_t;

/**
	* 線程池的數據結構
*/
typedef struct cet_threadpool
{
	pthread_mutex_t 	lock; 				/**< 線程池的互斥鎖 */
	pthread_cond_t 		wake_idle;			/**< 線程池的條件變量 */
	cet_taskpool_t		*pool_list;			/**< 線程池的線程表 */
	cet_handler_task_t	*task_queue_head;   /**< 任務待執行隊列 */
	
	char 				name[32];			/**< 線程池的名稱 */
	int 				threads_in_pool;	/**< 線程池中的線程數量 */
	int 				queue_depth;		/**< 當前隊列的深度 */
	int 				max_queue_depth;	/**< 隊列的最大深度 */
	int 				done_task_count;	/**< 任務完成的數量 */
	int 				running_tasks;		/**< 當前運行的任務數 */
		
	unsigned int		destroying : 1;		/**< 1:開始銷燬線程池 */
	
} cet_threadpool_t;

#define DEBUG

#if defined(DEBUG)
#define cet_prefix(stdx, sign) \
	fprintf(stdx, "[ThreadId(%p)][%s,%s,%d %s]: ", (void *)pthread_self(), __FILE__, __FUNCTION__, __LINE__, sign)
#define cet_info(...) do { cet_prefix(stdout, "info"); printf(__VA_ARGS__); putchar('\n'); } while (0)
#define cet_debug(...) do { cet_prefix(stdout, "debug"); printf(__VA_ARGS__); putchar('\n'); } while (0)
#define cet_error(...) do { cet_prefix(stderr, "error"); printf(__VA_ARGS__); putchar('\n'); } while (0)
#else
#define cet_info(...) do { } while (0)
#define cet_debug(...) do { } while (0)
#define cet_error(...) do { } while (0)
#endif

cet_usec_t
cet_now_usecs(void)
{
	struct timeval tv;
	gettimeofday(&tv, NULL);
	return ((unsigned  long)tv.tv_sec * 1000000L) + tv.tv_usec;
}

static int
ms_delta(cet_usec_t now, cet_usec_t then)
{
	return (int)((now - then) / 1000);
}

static void
set_threadpool_task_state(cet_handler_task_t *task, enum cet_threadpool_task_status status)
{
	task->status = status;
	task->entered_state = cet_now_usecs();
}

static void
cet_threadpool_task_dump(cet_handler_task_t *task, char *buf, int len)
{
	snprintf(buf, len, "task: %s, state %d, (on thread: %d ms, ran: %lu ms, synced:%lu ms)", \
		task->name, task->status, ms_delta(task->activated, task->created),
		task->acc_running, task->acc_syncing);
}

static void
cet_threadpool_dump(cet_threadpool_t *tpool)
{
	cet_handler_task_t **t1;
	char buf[256];
	int n, count;

	pthread_mutex_lock(&tpool->lock); /* ======================== tpool lock */

	cet_debug("tp: %s, Queued: %d, Run: %d, Done: %d\n", tpool->name, \
			tpool->queue_depth, tpool->running_tasks, tpool->done_task_count);

	count = 0;
	t1 = &tpool->task_queue_head;
	while (*t1) {
		cet_handler_task_t *task = *t1;
		cet_threadpool_task_dump(task, buf, sizeof(buf));
		cet_debug("  - %s\n", buf);
		count++;

		t1 = &(*t1)->task_queue_next;
	}

	count = 0;
	for (n = 0; n < tpool->threads_in_pool; n++) {
		cet_taskpool_t *pool = &tpool->pool_list[n];
		cet_handler_task_t *task = pool->task;

		if (task) {
			cet_threadpool_task_dump(task, buf, sizeof(buf));
			cet_debug("  - worker %d: %s\n", n, buf);
			count++;
		}
	}

	pthread_mutex_unlock(&tpool->lock); /* --------------- tpool unlock */
}

int
cet_threadpool_dequeue(cet_threadpool_t *tpool, cet_handler_task_t *task)
{	
	int n;
	cet_handler_task_t **t1;
	
	assert((NULL != task) && (NULL != tpool));

	pthread_mutex_lock(&tpool->lock); /* ======================== tpool lock */

	t1 = &tpool->task_queue_head;
	while (*t1) {
		if ((*t1) == task) {
			*t1 = task->task_queue_next;
			set_threadpool_task_state(task, CET_TP_STATUS_STOPPED);
			tpool->queue_depth--;
			cet_info("tp %p: removed queued task %p\n", tpool, task);
			break;
		}
		t1 = &(*t1)->task_queue_next;
	}

	for (n = 0; n < tpool->threads_in_pool; n++) {
		if (!tpool->pool_list[n].task || tpool->pool_list[n].task != task)
			continue;
		
		pthread_mutex_lock(&tpool->pool_list[n].lock);

		set_threadpool_task_state(task, CET_TP_STATUS_STOPPING);

		pthread_mutex_unlock(&tpool->pool_list[n].lock);

		cet_info("tp %p: request stop running task for %p\n", tpool, task->args);
		break;
	}

	free(task);

	pthread_mutex_unlock(&tpool->lock); /* -------------------- tpool unlock */

	return 0;
}

cet_handler_task_t *
cet_threadpool_enqueue(cet_threadpool_t *tpool, 
				int ( *routine )(void *), void *args, 
				const char *format, ...)
{	
	va_list ap;
	cet_handler_task_t *task = NULL;
	
	assert((NULL != routine) && (NULL != tpool));
	
	if (tpool->destroying)
		return NULL;

	pthread_mutex_lock(&tpool->lock);

	if (tpool->queue_depth == tpool->max_queue_depth) {
		cet_error("%d queue length limitation %d", tpool->queue_depth, tpool->max_queue_depth);
		goto CET_HANDLER_EXIT;
	}

	task = (cet_handler_task_t *)calloc(sizeof(char), sizeof(cet_handler_task_t));
	if (NULL == task) {
		cet_error("task calloc %m");
		goto CET_HANDLER_EXIT;
	}

	task->args 		= args;
	task->tpool 	= tpool;
	task->routine 	= routine;
	task->created	= cet_now_usecs();
	pthread_cond_init(&task->wake_idle, NULL);

	va_start(ap, format);
	vsnprintf(task->name, sizeof(task->name) - 1, format, ap);
	va_end(ap);

	// 鏈表頭插入的方式
	task->task_queue_next = tpool->task_queue_head;
	set_threadpool_task_state(task, CET_TP_STATUS_QUEUED);
	tpool->task_queue_head = task;	
	tpool->queue_depth++;

	cet_info("tp %s: enqueued task %p (%s) depth %d\n", tpool->name, task, task->name,
		    tpool->queue_depth);
	
	pthread_cond_signal(&tpool->wake_idle);

CET_HANDLER_EXIT:
	
	pthread_mutex_unlock(&tpool->lock);

	return task;
}
				
void
cet_threadpool_task_sync(cet_handler_task_t *task, int stop)
{
	cet_debug("sync");

	if (stop)
		set_threadpool_task_state(task, CET_TP_STATUS_STOPPING);

	pthread_cond_signal(&task->wake_idle);
}

/**
	* 若是連續同步15 * 2 = 30s仍然還未被喚醒,則停止改任務
*/
static int
cet_threadpool_worker_sync(cet_taskpool_t *pool, cet_handler_task_t *task)
{
	enum cet_threadpool_task_status pre_status;
	struct timespec abstime;
	int tries = 15;

	cet_info("%p: LWS_TP_RETURN_SYNC in\n", task);
	pthread_mutex_lock(&pool->lock); /* ======================= pool lock */

	cet_info("%s: task %p (%s): syncing", pool->tpool->name, task, task->name);

	pre_status = task->status;
	set_threadpool_task_state(task, CET_TP_STATUS_SYNCING);
	while (tries--) {
		abstime.tv_sec = time(NULL) + 2;
		abstime.tv_nsec = 0;

		/* 讓執行該任務的線程堵塞在該語句上2秒,直至超時或被喚醒 */
		if (pthread_cond_timedwait(&task->wake_idle, &pool->lock, &abstime) == ETIMEDOUT) {
			if (!tries) {
				cet_error("%s: task %p (%s): SYNC timed out", pool->tpool->name, task, task->name);
				set_threadpool_task_state(task, CET_TP_STATUS_STOPPING);
				goto CET_HANDLER_EXIT;
			}

			continue;
		} else
			break;
	}

	if (task->status == CET_TP_STATUS_STOPPING)
		set_threadpool_task_state(task, pre_status);

	cet_debug("%p: LWS_TP_RETURN_SYNC out\n", task);

CET_HANDLER_EXIT:
	pthread_mutex_unlock(&pool->lock); /* ----------------- - pool unlock */

	return 0;
}

void *
cet_threadpool_worker(void *arg)
{
	cet_handler_task_t 	**t1, **t2;
	cet_handler_task_t 	*task = NULL;
	cet_threadpool_t 	*tpool = NULL;
	cet_taskpool_t	 	*pool = NULL;
	char buf[256];
	
	assert(NULL != arg);

	pool	= (cet_taskpool_t *)arg;
	tpool 	= pool->tpool;
	while (!tpool->destroying) {
				
		pthread_mutex_lock(&tpool->lock);

		while (!tpool->task_queue_head && !tpool->destroying)
			pthread_cond_wait(&tpool->wake_idle, &tpool->lock);

		if (tpool->destroying) {
			pthread_mutex_unlock(&tpool->lock);  /* ------ tpool unlock */
			continue;
		}		

		t2 = NULL;
		pool->task = NULL;
		t1 = &tpool->task_queue_head;
		/* 找到隊尾的任務 */
		while (*t1) {
			t2 = t1;
			t1 = &(*t1)->task_queue_next;
		}

		/* 執行隊尾的任務 */
		if (t2 && *t2) {
			pool->task = task = *t2;
			task->activated = pool->activated = cet_now_usecs();
			*t2 = task->task_queue_next;
			task->task_queue_next = NULL;
			tpool->queue_depth--;
			set_threadpool_task_state(task, CET_TP_STATUS_RUNNING);
		}

		if (!task) {
			pthread_mutex_unlock(&tpool->lock);  /* ------ tpool unlock */
			continue;
		}

		cet_threadpool_task_dump(task, buf, sizeof(buf));

		cet_info("%s: worker %d ACQUIRING: %s\n", tpool->name, pool->worker_id, buf);
		tpool->running_tasks++;

		pthread_mutex_unlock(&tpool->lock); /* --------------- tpool unlock */

		do {
			int ret;
			cet_usec_t thentime = 0;
			
			if (tpool->destroying) {
				set_threadpool_task_state(task, CET_TP_STATUS_STOPPING);
			}
			
			if (!task->routine) {
				cet_error("task %p task->routine is null", task);
				continue;
			}
			thentime = cet_now_usecs();
			ret = task->routine( task->args );			
			task->acc_running = (cet_now_usecs() - thentime) / 1000;
			switch (ret) {
				case 0: // 任務執行完成
					set_threadpool_task_state(task, CET_TP_STATUS_STOPPED);
					break;
				case 1: // 同步中, 堵塞任務,等待喚醒
					thentime = cet_now_usecs();
					cet_threadpool_worker_sync(pool, task);
					task->acc_syncing = (cet_now_usecs() - thentime) / 1000;
					break;
			}
			cet_info("ret %d, execute time %lu ms\n", ret, task->acc_running);
		} while (CET_TP_STATUS_RUNNING == task->status);

		pthread_mutex_lock(&tpool->lock); /* =================== tpool lock */

		tpool->running_tasks--;

		if (CET_TP_STATUS_STOPPED == pool->task->status)
			set_threadpool_task_state(task, CET_TP_STATUS_STOPPED);

		if (CET_TP_STATUS_STOPPED == pool->task->status) {
			tpool->done_task_count++;			
			cet_threadpool_task_dump(pool->task, buf, sizeof(buf));
			cet_info("%s: worker %d DONE: %s\n", tpool->name, pool->worker_id, buf);
		}

		pool->task = NULL;
		pthread_mutex_unlock(&tpool->lock); /* --------------- tpool unlock */
	}	
	cet_info("thread exit");
	pthread_exit(NULL);
}

cet_threadpool_t *
cet_threadpool_create(int thread_num, int max_queue_depth, const char *format, ...)
{
	va_list 			ap;
	int					idx = 0;
	cet_threadpool_t	*tpool = NULL;

	assert(thread_num > 0 && max_queue_depth > 0);
	
	tpool = (cet_threadpool_t *)calloc(sizeof(char), sizeof(cet_threadpool_t));
	if (NULL == tpool) {		
		cet_error("tpool calloc %m");
		goto CET_ERROR_EXIT;
	}

	tpool->pool_list = (cet_taskpool_t *)calloc(sizeof(char), sizeof(cet_taskpool_t) * thread_num);
	if (NULL == tpool->pool_list) {
		cet_error("pool_list calloc %m");
		goto CET_ERROR_EXIT;
	}

	tpool->max_queue_depth = max_queue_depth;

	va_start(ap, format);
	vsnprintf(tpool->name, sizeof(tpool->name) - 1, format, ap);
	va_end(ap);

	pthread_mutex_init(&tpool->lock, NULL);
	pthread_cond_init(&tpool->wake_idle, NULL);

	for (idx = 0; idx < thread_num; idx++) {
		tpool->pool_list[idx].tpool = tpool;
		tpool->pool_list[idx].worker_id = idx;
		pthread_mutex_init(&tpool->pool_list[idx].lock, NULL);
		if (pthread_create(&tpool->pool_list[idx].thread_id, NULL, \
			cet_threadpool_worker, &tpool->pool_list[idx])) {			
			cet_error("pthread_create failed");
		} else {
			tpool->threads_in_pool++;
		}
	}

	return tpool;

CET_ERROR_EXIT:
	
	if (NULL != tpool->pool_list) {
		free(tpool->pool_list);
	}
	if (NULL != tpool) {
		free(tpool);
	}
	
	return NULL;
}

void
cet_threadpool_destroy(cet_threadpool_t *tpool)
{
	cet_handler_task_t *task;
	void *retval;
	int n;

	pthread_mutex_lock(&tpool->lock); /* ======================== tpool lock */

	tpool->destroying = 1;
	pthread_cond_broadcast(&tpool->wake_idle);
	pthread_mutex_unlock(&tpool->lock); /* -------------------- tpool unlock */

	cet_threadpool_dump(tpool);

	for (n = 0; n < tpool->threads_in_pool; n++) {
		task = tpool->pool_list[n].task;

		if (task != NULL)
			pthread_cond_broadcast(&task->wake_idle);

		pthread_join(tpool->pool_list[n].thread_id, &retval);
		pthread_mutex_destroy(&tpool->pool_list[n].lock);
	}
	cet_info("all threadpools exited\n");

	pthread_mutex_destroy(&tpool->lock);

	free(tpool);
}

static char *test_string[] = { 
		"test string 0", "test string 1", "test string 2", "test string 3", "test string 4"
	};

int
routine_task(void *arg)
{
	int value = 0;
	int idx = 0;

	for (idx = 0; idx < 1000000; idx++) {
		value = value * idx;
	}

	cet_debug("show: %s", (char *)arg);	

	return 0;
}

int main(int argc, char * const argv[])
{
	int	idx = 0;
	int thread_num = 8;
	int queue_max_size = 16;
	cet_threadpool_t *tpool = NULL;
	
	tpool = cet_threadpool_create(thread_num, queue_max_size, "threadpool-%d", 0);
	if (NULL == tpool) {
		cet_error("cet_threadpool_create failed %m");	
		return -1;
	}

	sleep(1);
	
	cet_debug("create thread num %d, queue size %d done.", thread_num, queue_max_size);	

	for (idx = 0; idx < 5; idx++) {
		cet_threadpool_enqueue(tpool, routine_task, test_string[idx], "routine_task-%d", idx);
	}

	//while (1) {
		sleep(1);
	//}

	//cet_threadpool_dump(tpool);

	cet_threadpool_destroy(tpool);

	return 0;
}

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章