信號量的結構體定義如下:linux+v2.6.28/include/linux/semaphore.h:
struct semaphore { spinlock_t lock; //自旋鎖 unsigned int count; struct list_head wait_list; //內核的雙向鏈表 };
|
初始化信號量:
#define init_MUTEX(sem) sema_init(sem, 1) #define init_MUTEX_LOCKED(sem) sema_init(sem, 0)
|
獲取信號量:
extern void down(struct semaphore *sem); extern int __must_check down_interruptible(struct semaphore *sem);
|
以上兩個函數都是用來獲取信號量的。不同之處在於down會導致睡眠,而且不能被信號打斷。而down_interruptible也會導致睡眠,但是它可以被信號打斷。
釋放信號量:
extern void up(struct semaphore *sem);
|
/***********************************************************************************************/
爲了更好的理解阻塞和非阻塞,先來看兩段代碼:
/******************************代碼1***************************************/
/* 阻塞地從串口讀取一個字符*/
fd = open("/dev/ttyS1", O_RDWR); ...
ret = read(fd, buf, 1); //當串口上有數據時才返回 if(ret > 0)
printf("%c\n", buf);
/******************************代碼2***************************************/ /* 非阻塞地從串口讀取一個字符 */
fd = open("/dev/ttyS2", O_RDWR|O_NONBLOCK); ... while(read(fd, buf, 1) != 1); //串口上沒有數據也返回,所以要循環讀取串口 printf("%c", buf);
|
阻塞是指在執行設備操作時如果不能獲得資源則掛起進程,直到資源可以被獲取後再進行操作。
阻塞從字面上聽起來意味着低效率,實則不然,如果設備不阻塞,則用戶想要獲取設備資源只能不停的查詢,這反而會無謂地消耗CPU資源。而阻塞訪問時,不能獲取資源的進程將進入休眠,它將CPU資源讓給其他進程,直到資源可以被獲取。
在LInux驅動程序中,可以使用等待隊列來實現阻塞進程的喚醒。
等待隊列結構體如下:
linux+v2.6.28/include/linux/wait.h:
typedef struct wait_queue wait_queue_t; struct __wait_queue {
unsigned int flags; #define WQ_FLAG_EXCLUSIVE 0x01
void *private;
wait_queue_func_t func;
struct list_head task_list; };
|
等待隊列頭定義如下:linux+v2.6.28/include/linux/wait.h:
struct __wait_queue_head { spinlock_t lock; struct list_head task_list; }; typedef struct __wait_queue_head wait_queue_head_t;
|
初始化等待隊列頭和等待隊列:
初始化等待隊列頭有兩種方式:
void init_waitqueue_head(wait_queue_head_t *q);
DECLARE_WAIT_QUEUE_HEAD(name);
linux+v2.6.28/include/linux/wait.h:
#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \ .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ .task_list = { &(name).task_list, &(name).task_list } } #define DECLARE_WAIT_QUEUE_HEAD(name) \
wait_queue_head_t name = WAIT_QUEUE_HEAD_INITIALIZER(name)
|
linux+v2.6.28/kernel/wait.c:
void init_waitqueue_head(wait_queue_head_t *q) { spin_lock_init(&q->lock); INIT_LIST_HEAD(&q->task_list); }
|
對於等待隊列,使用一個宏去定義和初始化:
DECLARE_WAITQUEUE(name, tsk)
#define __WAITQUEUE_INITIALIZER(name, tsk) { \ .private = tsk, \ .func = default_wake_function, \ .task_list = { NULL, NULL } } #define DECLARE_WAITQUEUE(name, tsk) \ wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
|
在使用等待隊列前,需要定義和初始化等待隊列頭和等待隊列。
定義和初始化完成後,需要從等待隊列頭添加或者移出等待隊列:
linux+v2.6.28/kernel/wait.c:
/* 添加等待隊列*/
void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait) { unsigned long flags; wait->flags &= ~WQ_FLAG_EXCLUSIVE; spin_lock_irqsave(&q->lock, flags); __add_wait_queue(q, wait); spin_unlock_irqrestore(&q->lock, flags); } EXPORT_SYMBOL(add_wait_queue);
|
/* 移出等待隊列*/
void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait) { unsigned long flags; spin_lock_irqsave(&q->lock, flags); __remove_wait_queue(q, wait); spin_unlock_irqrestore(&q->lock, flags); } EXPORT_SYMBOL(remove_wait_queue);
|
等待事件:
wait_event(queue, condition);
wait_event_interruptible(queue, condition);
wait_event_timeout(queue, condition, timeout);
wait_event_interruptible_timeout(queue, condition, timeout);
wait_event_xxx函數第一個參數是等待隊列頭,第二個參數condition必須滿足,否則阻塞。wait_event()和wait_event_interruptible()區別在於後則可以被信號打斷。timeout是阻塞等待的超時時間,以jiffy爲單位,在第三個參數的timeout到達時,不論condition是否滿足,均返回。
舉例來看 wait_event_interruptible()
linux+v2.6.28/include/linux/wait.h:
#define __wait_event_interruptible(wq, condition, ret) \ do { \ DEFINE_WAIT(__wait); \ \ for (;;) { \ prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \ if (condition)//條件滿足 返回 \ break; \ if (!signal_pending(current)) { \ schedule(); \ continue; \ } \ ret = -ERESTARTSYS; \ break; \ } \ finish_wait(&wq, &__wait); \ } while (0)
#define wait_event_interruptible(wq, condition) \
277({ \
278 int __ret = 0; \
279 if (!(condition)) \
280 __wait_event_interruptible(wq, condition, __ret); \
281 __ret; \
282})
|
喚醒隊列:
void wake_up(wait_queue_head_t *queue);
void wake_up_interruptible(wait_queue_head_t *queue);
以上兩個函數會喚醒以queue作爲等待隊列頭的所用等待隊列的進程.
/***********************************************************************/
poll函數使用來支持用戶層的select函數調用.
select函數原型如下:
int select(int numfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds, struct timeval *timeout);
其中readfsd, writefds, exceptfds分別是被select()監視的讀、寫、異常處理的文件描述符集合。numfds是需要檢查的號碼的最大的文件描述符加一。timeout參數是一個指向struct timeval結構體的指針,它可以使select()等待timeout時間後返回.
struct timeval定義如下:
struct timeval {
int tv_sec; //秒
int tv_usec; //微秒
}
下面的宏用來設置、清除、判斷文件描述符集合:
FD_ZERO(fd_set *set); 清除一個文件描述符集合
FD_SET(int fd, fd_set *set); 將一個文件描述符加入到文件描述符集中
FD_CLR(int fd, fd_set *set); 將一個文件描述符從文件描述符集中清除
FD_ISSET(int fd, fd_set *set); 判斷文件描述符是否置位
設備驅動中的poll原型如下:
unsigned int (*poll)(struct file *filp, struct poll_table *wait);
第一個參數爲file結構體指針,第二個參數爲輪詢表指針。
這個函數應該進行以下兩項工作:
(1)對可能引起設備文件狀態變化的等待隊列調用poll_wait函數,將對應的等待隊列頭添加到poll_table。
(2)返回表示是否能對設備進行無阻塞讀,寫訪問的掩碼。
poll_table和poll_wait定義如下:
typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_table_struct *); typedef struct poll_table_struct { poll_queue_proc qproc;
} poll_table; static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p) { if (p && wait_address) p->qproc(filp, wait_address, p); //這裏不知道怎麼實現的,高手指點 }
|
poll返回設備資源的可獲取狀態:POLLIN, POLLOUT, POLLPRI, POLLERR, POLLNVAL等宏的位和或的結果.每個宏代表設備的一種狀態.
設備驅動的poll()典型模板是:
static unsigned int lan_poll(struct file *filp, poll_table *wait) {
unsigned int mask = 0;
struct xxx_dev_t *xxx_dev = filp->private_data;
down(&xxx_dev->sem);
poll_wait(filp, &xxx_dev->r_wait, wait);//加入讀等待隊列頭以便輪詢
poll_wait(filp, &xxx_dev->w_wait, wait);//加入寫等待隊列頭以便輪詢
if(...)
mask |= POLLIN | POLLRDNORM; //表示數據可以被讀取
if(...)
mask |= POLLOUT | POLLWRNORM; //表示數據可以被寫入
up(&xxx_dev->sem);
return mask; }
|
以上功能將在下面的字符設備中得到實現:
/*
* globalmem設備,沒有對應真實的硬件,主要用來學習Linux設備驅動開發。
* (1) 使用信號量支持併發
* (2) 使用等待隊列支持阻塞
* (3) 加入了poll函數支持用戶下的select查詢
* 建議交流:[email protected]
*/
#include <linux/module.h> #include <linux/kernel.h> #include <linux/fs.h> #include <linux/errno.h> #include <linux/mm.h> #include <linux/poll.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/cdev.h> #include <asm/io.h> #include <asm/system.h> #include <asm/uaccess.h>
#define LAN_SIZE 0x1000 //全局內存大小:4KB #define MEM_CLEAR 0x1 //清除全局內存 #define LAN_MAJOR 244 //主設備號
static int lan_major = LAN_MAJOR;
struct lan_dev_t{
struct cdev cdev;
unsigned int current_len; //fifo 有效數據長度
unsigned char lan_buf[LAN_SIZE];
struct semaphore sem; //信號量用於支持併發操作
wait_queue_head_t r_wait; //阻塞讀用的等待隊列頭
wait_queue_head_t w_wait; //阻塞寫用的等待隊列頭 };
struct lan_dev_t *lan_dev;
static int lan_open(struct inode *inode, struct file *filp) {
filp->private_data = lan_dev;
printk("Open OK!\n");
return 0; }
static int lan_release(struct inode *inode, struct file *filp) {
printk("Close OK!\n");
return 0; } static ssize_t lan_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos) {
unsigned long p = *ppos;
size_t count = size;
int ret = 0;
struct lan_dev_t *lan_dev = filp->private_data; /***************************************************************************/
if((filp->f_flags&O_NONBLOCK)){
goto no_block;
}
DECLARE_WAITQUEUE(wait, current); //定義等待隊列
down(&lan_dev->sem);
add_wait_queue(&lan_dev->r_wait, &wait); //進入讀等待隊列
while(lan_dev->current_len == 0)
{
__set_current_state(TASK_INTERRUPTIBLE); //改進程爲睡眠狀態
up(&lan_dev->sem);
schedule(); //調度其他進程
if(signal_pending(current)){
ret = -ERESTARTSYS;
goto signal_come;
}
down(&lan_dev->sem);
}
if(size > lan_dev->current_len)
size = lan_dev->current_len;
if(copy_to_user(buf, lan_dev->lan_buf, size)){
ret = -EFAULT;
goto out;
}else{
memcpy(lan_dev->lan_buf, lan_dev->lan_buf+size, lan_dev->current_len-size);
lan_dev->current_len -= size;
wake_up_interruptible(&lan_dev->w_wait); //喚醒寫等待進程
ret = size;
}
out:
up(&lan_dev->sem);
signal_come:
remove_wait_queue(&lan_dev->w_wait, &wait);
set_current_state(TASK_RUNNING);
return ret; /**************************************************************************/
no_block:
if(p >= LAN_SIZE)
return count? -ENXIO: 0;
if(count > LAN_SIZE - p)
count = LAN_SIZE - p;
if(count > lan_dev->current_len)
count = lan_dev->current_len;
down(&lan_dev->sem);
if(copy_to_user(buf, (void *)(lan_dev->lan_buf+p), count)){
ret = -EFAULT;
}else{
*ppos += count;
p = *ppos;
if(lan_dev->current_len > 0){
memcpy(lan_dev->lan_buf, lan_dev->lan_buf+count, lan_dev->current_len-count);
lan_dev->current_len -= count;
}
ret = count;
printk("Read %d byte from %ld\n", count, p);
}
up(&lan_dev->sem); /****************************************************************************/
return ret; } static ssize_t lan_write(struct file *filp, const char __user *buf, size_t size, loff_t *ppos) {
unsigned long p = *ppos;
unsigned int count = size;
int ret = 0;
struct lan_dev_t *lan_dev = filp->private_data; /****************************************************************************/
if((filp->f_flags&O_NONBLOCK)){
goto no_block;
}
DECLARE_WAITQUEUE(wait, current);
down(&lan_dev->sem);
add_wait_queue(&lan_dev->w_wait, &wait);
while(lan_dev->current_len == LAN_SIZE)
{
__set_current_state(TASK_INTERRUPTIBLE);
up(&lan_dev->sem);
schedule();
if(signal_pending(current)){
ret = -EAGAIN;
goto signal_come;
}
down(&lan_dev->sem);
}
if(size > LAN_SIZE - lan_dev->current_len)
size = LAN_SIZE - lan_dev->current_len;
if(copy_from_user(lan_dev->lan_buf+lan_dev->current_len, buf, size)){
ret = -EFAULT;
goto out;
}else{
lan_dev->current_len += size;
printk("lan_dev->current_len = %d\n", lan_dev->current_len);
wake_up_interruptible(&lan_dev->r_wait);
ret = size;
}
out:
up(&lan_dev->sem);
signal_come:
remove_wait_queue(&lan_dev->w_wait, &wait);
set_current_state(TASK_RUNNING);
return ret; /****************************************************************************/
no_block:
if(p >= LAN_SIZE)
return count? -ENXIO: 0;
if(count > LAN_SIZE - p)
count = LAN_SIZE - p;
down(&lan_dev->sem);
if(copy_from_user(lan_dev->lan_buf + p, buf, count))
ret = -EFAULT;
else{
*ppos += count;
p = *ppos;
lan_dev->current_len += count;
ret = count;
printk("Write %d byte from %ld\n", count, p);
}
up(&lan_dev->sem);
return ret; }
static int lan_ioctl(struct inode *inodep, struct file *filp, unsigned int cmd, unsigned long arg) {
struct lan_dev_t *lan_dev = filp->private_data;
switch(cmd)
{
case MEM_CLEAR:
down(&lan_dev->sem);
memset(lan_dev->lan_buf, 0, LAN_SIZE);
lan_dev->current_len = 0;
up(&lan_dev->sem);
printk("Memset buf ok!\n");
break;
default:
return -EINVAL;
}
return 0; }
static unsigned int lan_poll(struct file *filp, poll_table *wait) {
unsigned int mask = 0;
struct lan_dev_t *lan_dev = filp->private_data;
down(&lan_dev->sem);
poll_wait(filp, &lan_dev->r_wait, wait);
poll_wait(filp, &lan_dev->w_wait, wait);
if(lan_dev->current_len != 0)
mask |= POLLIN | POLLRDNORM; //表示數據可以被讀取
if(lan_dev->current_len != LAN_SIZE)
mask |= POLLOUT | POLLWRNORM; //表示數據可以被寫入
up(&lan_dev->sem);
return mask; }
static struct file_operations lan_fops = {
.owner = THIS_MODULE,
.open = lan_open,
//.llseek = lan_llseek,
.read = lan_read,
.write = lan_write,
.ioctl = lan_ioctl,
.poll = lan_poll,
.release = lan_release, };
static void lan_setup_cdev(struct lan_dev_t *lan_dev, int index) {
int err, devno;
devno = MKDEV(lan_major, index);
cdev_init(&lan_dev->cdev, &lan_fops);
err = cdev_add(&lan_dev->cdev, devno, 1);
if(err)
printk(KERN_NOTICE"Error %d adding cdev!\n", err); }
static int __init lancdev_init(void) {
int ret;
dev_t devno = MKDEV(lan_major, 0);
ret = register_chrdev_region(devno, 1, "globalmem");
if(ret < 0)
return ret;
lan_dev = kmalloc(sizeof(struct lan_dev_t), GFP_KERNEL);
if(!lan_dev){
ret = -ENOMEM;
printk("kmalloc Error!\n");
return ret;
}
memset(lan_dev, 0, sizeof(struct lan_dev_t));
lan_setup_cdev(lan_dev, MINOR(devno));
init_MUTEX(&lan_dev->sem);
init_waitqueue_head(&lan_dev->r_wait);
init_waitqueue_head(&lan_dev->w_wait);
return 0; } static void __exit lancdev_exit(void) {
cdev_del(&lan_dev->cdev);
kfree(lan_dev);
unregister_chrdev_region(MKDEV(lan_major, 0), 1); }
MODULE_AUTHOR("LanPeng");
MODULE_LICENSE("GPL");
module_init(lancdev_init);
module_exit(lancdev_exit);
|
用戶測試程序select測試:
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/ioctl.h>
int main(int argc, char** argv) {
int fd;
fd_set rfds, wfds;
fd = open("/dev/globalmem_lan", O_RDWR);
if(fd < 0){
printf("Open Error!\n");
return -1;
}
if(ioctl(fd, 0x01, 0) < 0){
printf("ioctl Error!\n");
return -1;
}
while(1){
FD_ZERO(&rfds);
FD_ZERO(&wfds);
FD_SET(fd, &rfds);
FD_SET(fd, &wfds);
select(fd + 1, &rfds, &wfds, NULL, NULL);
if(FD_ISSET(fd, &rfds)){
printf("Poll monitor : can be read!\n");
}
if(FD_ISSET(fd, &wfds)){
printf("Poll monitor: can be write!\n");
}
}
return 0; }
|
<script type=text/javascript charset=utf-8 src="http://static.bshare.cn/b/buttonLite.js#style=-1&uuid=&pophcol=3&lang=zh"></script>
<script type=text/javascript charset=utf-8 src="http://static.bshare.cn/b/bshareC0.js"></script>
閱讀(826) | 評論(2) | 轉發(0) |
給主人留下些什麼吧!~~
8353042052013-07-08 23:51:32
p->qproc(filp, wait_address, p); //這裏不知道怎麼實現的,高手指點
原型在static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc)中賦值
例如:static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
poll_table *p)
{
struct poll_wqueues *pwq = container_of(p, struct poll_wqueues, pt);
struct poll_table_entry *entry = poll_get_entry(pwq);
if (!entry)
chinaunix網友2010-09-29 11:39:05
很好的, 收藏了
推薦一個博客,提供很多免費軟件編程電子書下載:
http://free-ebooks.appspot.com