python、c++編程面試題

線程池:

線程池是一種多線程處理形式,處理過程中將任務添加到隊列,然後在創建線程後自動啓動這些任務。線程池線程都是後臺線程。每個線程都使用默認的堆棧大小,以默認的優先級運行,並處於多線程單元中。如果某個線程在託管代碼中空閒(如正在等待某個事件),則線程池將插入另一個輔助線程來使所有處理器保持繁忙。如果所有線程池線程都始終保持繁忙,但隊列中包含掛起的工作,則線程池將在一段時間後創建另一個輔助線程但線程的數目永遠不會超過最大值。超過最大值的線程可以排隊,但他們要等到其他線程完成後才啓動。

線程池線程數設置:N核服務器,通過執行業務的單線程分析出本地計算時間爲x,等待時間爲y,則工作線程數(線程池線程數)設置爲 N*(x+y)/x,能讓CPU的利用率最大化。

python代碼,使用from threading import Thread:


import socket
import threading
from threading import Thread
import threading
import sys
import time
import random
from Queue import Queue

host = ''
port = 8888
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host, port))
s.listen(3)

class ThreadPoolManger():
    """線程池管理器"""
    def __init__(self, thread_num):
        # 初始化參數
        self.work_queue = Queue()
        self.thread_num = thread_num
        self.__init_threading_pool(self.thread_num)

    def __init_threading_pool(self, thread_num):
        # 初始化線程池,創建指定數量的線程池
        for i in range(thread_num):
            thread = ThreadManger(self.work_queue)
            thread.start()

    def add_job(self, func, *args):
        # 將任務放入隊列,等待線程池阻塞讀取,參數是被執行的函數和函數的參數
        self.work_queue.put((func, args))

class ThreadManger(Thread):
    """定義線程類,繼承threading.Thread"""
    def __init__(self, work_queue):
        Thread.__init__(self)
        self.work_queue = work_queue
        self.daemon = True

    def run(self):
        # 啓動線程
        while True:
            target, args = self.work_queue.get()
            target(*args)
            self.work_queue.task_done()

# 創建一個有4個線程的線程池
thread_pool = ThreadPoolManger(4)

# 處理http請求,這裏簡單返回200 hello world
def handle_request(conn_socket):
    recv_data = conn_socket.recv(1024)
    reply = 'HTTP/1.1 200 OK \r\n\r\n'
    reply += 'hello world'
    print ('thread %s is running ' % threading.current_thread().name)
    conn_socket.send(reply)
    conn_socket.close()

# 循環等待接收客戶端請求
while True:
    # 阻塞等待請求
    conn_socket, addr = s.accept()
    # 一旦有請求了,把socket扔到我們指定處理函數handle_request處理,等待線程池分配線程處理
    thread_pool.add_job(handle_request, *(conn_socket, ))

s.close()

python代碼,使用import threadpool

'''
import time
def sayhello(str):
    print ("Hello ",str)
    time.sleep(2)

name_list =['aa','bb','cc']
start_time = time.time()
for i in range(len(name_list)):
    sayhello(name_list[i])
print ('%d second'% (time.time()-start_time))
'''

import time
import threadpool
def sayhello(str):
    print ("Hello ",str)
    time.sleep(2)

name_list =['aa','bb','cc']
start_time = time.time()
pool = threadpool.ThreadPool(10)
requests = threadpool.makeRequests(sayhello, name_list)
[pool.putRequest(req) for req in requests]
pool.wait()
print ('%d second'% (time.time()-start_time))

TCP和UDP的區別

1、TCP面向連接(如打電話要先撥號建立連接);UDP是無連接的,即發送數據之前不需要建立連接
2、TCP提供可靠的服務。也就是說,通過TCP連接傳送的數據,無差錯,不丟失,不重複,且按序到達;UDP盡最大努力交付,即不保證可靠交付
3、TCP面向字節流,實際上是TCP把數據看成一連串無結構的字節流;UDP是面向報文的,UDP沒有擁塞控制,因此網絡出現擁塞不會使源主機的發送速率降低(對實時應用很有用,如IP電話,實時視頻會議等)
4、每一條TCP連接只能是點到點的;UDP支持一對一,一對多,多對一和多對多的交互通信
5、TCP首部開銷20字節;UDP的首部開銷小,只有8個字節
6、TCP的邏輯通信信道是全雙工的可靠信道,UDP則是不可靠信道

CUDA編程

簡介

#include<stdio.h>

__global__ void add(int a, int b, int *c) {
	*c = a + b;
}
int main() {
	int c;
	int *dev_c;
	cudaMalloc((void**)&dev_c, sizeof(int));
	add << <1, 1 >> >(2, 7, dev_c);
	cudaMemcpy(&c, dev_c, sizeof(int), cudaMemcpyDeviceToHost);
	printf("2 + 7 = %d", c);
	return 0;
}

函數的定義帶有了__global__這個標籤,表示這個函數是在GPU上運行,這裏涉及了GPU和主機之間的內存交換了,cudaMalloc是在GPU的內存裏開闢一片空間,然後通過操作之後,這個內存裏有了計算出來內容,再通過cudaMemcpy這個函數把內容從GPU複製出來。就是這麼簡單。
並行編程 kernel.cu

#include "cuda_runtime.h"
#include "device_launch_parameters.h"

#include <stdio.h>

cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);

__global__ void addKernel(int *c, const int *a, const int *b)
{
    int i = threadIdx.x;
    c[i] = a[i] + b[i];
}

int main()
{
    const int arraySize = 5;
    const int a[arraySize] = { 1, 2, 3, 4, 5 };
    const int b[arraySize] = { 10, 20, 30, 40, 50 };
    int c[arraySize] = { 0 };

    // Add vectors in parallel.
    cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "addWithCuda failed!");
        return 1;
    }

    printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
        c[0], c[1], c[2], c[3], c[4]);

    // cudaDeviceReset must be called before exiting in order for profiling and
    // tracing tools such as Nsight and Visual Profiler to show complete traces.
    cudaStatus = cudaDeviceReset();
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "cudaDeviceReset failed!");
        return 1;
    }

    return 0;
}

// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
    int *dev_a = 0;
    int *dev_b = 0;
    int *dev_c = 0;
    cudaError_t cudaStatus;

    // Choose which GPU to run on, change this on a multi-GPU system.
    cudaStatus = cudaSetDevice(0);
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "cudaSetDevice failed!  Do you have a CUDA-capable GPU installed?");
        goto Error;
    }

    // Allocate GPU buffers for three vectors (two input, one output)    .
    cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "cudaMalloc failed!");
        goto Error;
    }

    cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "cudaMalloc failed!");
        goto Error;
    }

    cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "cudaMalloc failed!");
        goto Error;
    }

    // Copy input vectors from host memory to GPU buffers.
    cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "cudaMemcpy failed!");
        goto Error;
    }

    cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "cudaMemcpy failed!");
        goto Error;
    }

    // Launch a kernel on the GPU with one thread for each element.
    addKernel<<<1, size>>>(dev_c, dev_a, dev_b);

    // Check for any errors launching the kernel
    cudaStatus = cudaGetLastError();
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
        goto Error;
    }
    
    // cudaDeviceSynchronize waits for the kernel to finish, and returns
    // any errors encountered during the launch.
    cudaStatus = cudaDeviceSynchronize();
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
        goto Error;
    }

    // Copy output vector from GPU buffer to host memory.
    cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "cudaMemcpy failed!");
        goto Error;
    }

Error:
    cudaFree(dev_c);
    cudaFree(dev_a);
    cudaFree(dev_b);
    
    return cudaStatus;
}

在這裏插入圖片描述

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章