Linux 块设备原理(二)以ext2文件系统为例分析块设备工作原理

在上一篇文章中,分析了如何去写一个基于MTD的flash驱动,大概分析了整个注册流程。整个块驱动就工作起来了。那么文件的读写,与块驱动的关系。文件系统与块驱动的关系,到底是怎么样的呢。这个问题还是值得研究一下的。本文主要分析vfs层到flash驱动层的数据流走向。通过该过程的分析,力求可以看清整个块驱动的工作原理。

首先大概了解一下ext2文件系统的基本原理。

1 ext2文件系统

1.1 ext2文件系统结构框图

每一个文件或者目录在磁盘上都有一个inode用于管理文件本身属性信息,还有数据块用于存放文件内容。其inode'和数据块关系如下图: 

 如果文件比较小,其数据块少于12个,其数据块索引就放在inode->i_blocks中,如果文件比较大,操作12个数据块就需要分配间接块来保存数据块索引

上面内容摘自该博文:

 https://blog.csdn.net/chenying126/article/details/77921542

从上面的框图就可以看出,ext2文件系统的开头用超级块来记录这个文件系统的信息,在系统格式化的时候,就会生成该超级块。同时文件系统中的文件和目录则用inode节点来维护,这个和vfs系统中的inode节点有所区别。对ext2文件系统有了一个初步的认识以后,我们下面从ext2文件系统的挂载出来,来分析一下如何从flash中读取数据,完成文件系统的挂载。

2 ext2文件系统挂载

一般挂载文件系统可以使用如下命令:

mount -t ext2 /dev/mtdblock0 /mnt

把mtdblock0设备挂载到/mnt目录下面,需要指定挂载的根目录已经要挂载的块设备。文件系统挂载的详细分析,已经在这篇文章中分析过:

https://blog.csdn.net/oqqYuJi12345678/article/details/101689334

这里重点分析ext2文件系统相关的挂载。挂载过程中会涉及到对flash的读写,借此过程刚好可以了解整个驱动的工作过程。ext2问价系统的挂载,跳过前面的挂载根目录的搜寻,直接分析ext2_mount函数:

static struct dentry *ext2_mount(struct file_system_type *fs_type,
	int flags, const char *dev_name, void *data)
{
	return mount_bdev(fs_type, flags, dev_name, data, ext2_fill_super);
}

ext2_mount函数主要完成ext2文件系统超级块的初始化。超级块初始化需要读取flash上面的文件系统信息。然后为该文件系统新建dentry和inode。上面函数中,需要留意ext2_fill_super函数,后面会用到。然后看那一下mount_bdev函数:

struct dentry *mount_bdev(struct file_system_type *fs_type,
	int flags, const char *dev_name, void *data,
	int (*fill_super)(struct super_block *, void *, int))
{
	struct block_device *bdev;
	struct super_block *s;
	fmode_t mode = FMODE_READ | FMODE_EXCL;
	int error = 0;

	if (!(flags & MS_RDONLY))
		mode |= FMODE_WRITE;
------------------------------------------------(1)
	bdev = blkdev_get_by_path(dev_name, mode, fs_type);
	if (IS_ERR(bdev))
		return ERR_CAST(bdev);
。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。
-------------------------------------------------(2)
	s = sget(fs_type, test_bdev_super, set_bdev_super, flags | MS_NOSEC,
		 bdev);
。。。。。。。。。。。。。。。。。。。。。。。
		char b[BDEVNAME_SIZE];

		s->s_mode = mode;
		strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id));
		sb_set_blocksize(s, block_size(bdev));
----------------------------------------------------(3)
		error = fill_super(s, data, flags & MS_SILENT ? 1 : 0);
		if (error) {
			deactivate_locked_super(s);
			goto error;
		}

		s->s_flags |= MS_ACTIVE;
		bdev->bd_super = s;
	}

。。。。。。。。。。。。。。
}

上面函数主要完成3个工作:

(1)通过给定的块设备节点名,找到真正的块设备,为后面的读写flash做准备

(2)分配并初始化一个superblock

(3)继续初始化superblock,并为根目录分配dentry和inode,完成文件系统的挂载。

2.1 通过blkdev_get_by_path关联块设备

struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
					void *holder)
{
	struct block_device *bdev;
	int err;
--------------------------------------------------(1)
	bdev = lookup_bdev(path);//通过设备名找到block_device 
	if (IS_ERR(bdev))
		return bdev;
---------------------------------------------------------(2)
	err = blkdev_get(bdev, mode, holder);//把block_device 和mtd设备相关联
	if (err)
		return ERR_PTR(err);

	if ((mode & FMODE_WRITE) && bdev_read_only(bdev)) {
		blkdev_put(bdev, mode);
		return ERR_PTR(-EACCES);
	}

	return bdev;
}

(1)先分析如何找到block_device

struct block_device *lookup_bdev(const char *pathname)
{
	struct block_device *bdev;
	struct inode *inode;
	struct path path;
	int error;

	if (!pathname || !*pathname)
		return ERR_PTR(-EINVAL);
//这个函数显而易见是通过路径行走,找到/mtdblock0节点的dentry和inode,该inode包含该块设备的设备号
	error = kern_path(pathname, LOOKUP_FOLLOW, &path);
	if (error)
		return ERR_PTR(error);

	inode = path.dentry->d_inode;
	error = -ENOTBLK;
	if (!S_ISBLK(inode->i_mode))
		goto fail;
	error = -EACCES;
	if (path.mnt->mnt_flags & MNT_NODEV)
		goto fail;
	error = -ENOMEM;
	bdev = bd_acquire(inode);//通过inode找到block_device
	if (!bdev)
		goto fail;
out:
	path_put(&path);
	return bdev;
fail:
	bdev = ERR_PTR(error);
	goto out;
}

重点分析bd_acquire函数:

static struct block_device *bd_acquire(struct inode *inode)
{
	struct block_device *bdev;

	spin_lock(&bdev_lock);
	bdev = inode->i_bdev;
。。。。。。。。。。。。。。

	bdev = bdget(inode->i_rdev);//很显然inode->i_bdev刚开始是没有值的,所以需要重新获取
	if (bdev) {
		spin_lock(&bdev_lock);
		if (!inode->i_bdev) {
			/*
			 * We take an additional reference to bd_inode,
			 * and it's released in clear_inode() of inode.
			 * So, we can access it via ->i_mapping always
			 * without igrab().
			 */
			ihold(bdev->bd_inode);
			inode->i_bdev = bdev;
			inode->i_mapping = bdev->bd_inode->i_mapping;
			list_add(&inode->i_devices, &bdev->bd_inodes);
		}
		spin_unlock(&bdev_lock);
	}
	return bdev;
}

bd_acquire

     ----------->bdget

struct block_device *bdget(dev_t dev)
{
	struct block_device *bdev;
	struct inode *inode;
/*这里先在inode的哈希表中进行查找与dev设备号对应的inode,如果没找到的话,
	  则通过bdev伪文件系统创建bdev_inode(包含inode和block device的结构体),我觉得应该是找不到的,这边需要重新分配*/
	inode = iget5_locked(blockdev_superblock, hash(dev),
			bdev_test, bdev_set, &dev);
	if (!inode)
		return NULL;
//通过inode获取bdev_inode,再通过bdev_inode获取block device实例
	bdev = &BDEV_I(inode)->bdev;
	if (inode->i_state & I_NEW) {
		bdev->bd_contains = NULL;
		bdev->bd_super = NULL;/*分别设置block device和inode的相关域*/
		bdev->bd_inode = inode;
		bdev->bd_block_size = (1 << inode->i_blkbits);
		bdev->bd_part_count = 0;
		bdev->bd_invalidated = 0;
		inode->i_mode = S_IFBLK;
		inode->i_rdev = dev;
		inode->i_bdev = bdev;//inode和bdev关联
		inode->i_data.a_ops = &def_blk_aops;
		mapping_set_gfp_mask(&inode->i_data, GFP_USER);
		inode->i_data.backing_dev_info = &default_backing_dev_info;
		spin_lock(&bdev_lock);
		list_add(&bdev->bd_list, &all_bdevs);
		spin_unlock(&bdev_lock);
		unlock_new_inode(inode);
	}
	return bdev;
}

bd_acquire

     ----------->bdget

          ------------->iget5_locked

struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
		int (*test)(struct inode *, void *),
		int (*set)(struct inode *, void *), void *data)
{
................................................
	inode = alloc_inode(sb);
............................................
if (set(inode, data)
。。。。。。。。。。。。。。。。。。。。。。。
}

该函数最重要的函数是alloc_inode,而alloc_inode用到blockdev_superblock超级块提供的函数分配inode。同时if (set(inode, data)函数和很重要,其执行bdev.bd_dev=dev_t,把设备号记录在block_device中,后面会用到

sb->s_op->alloc_inode

blockdev_superblock是一个伪文件系统,看一下在哪边初始化的:

static struct file_system_type bd_type = {
	.name		= "bdev",
	.mount		= bd_mount,
	.kill_sb	= kill_anon_super,
};


void __init bdev_cache_init(void)
{
。。。。。。。。。。。。。。。。。。。。。。。。。。。。。
	err = register_filesystem(&bd_type);
	if (err)
		panic("Cannot register bdev pseudo-fs");
	bd_mnt = kern_mount(&bd_type);
	if (IS_ERR(bd_mnt))
		panic("Cannot create bdev pseudo-fs");
	blockdev_superblock = bd_mnt->mnt_sb;   /* For writeback */
}

可以看到,这是个叫做bdev的伪文件系统。其sb->s_op函数集为:

static const struct super_operations bdev_sops = {
	.statfs = simple_statfs,
	.alloc_inode = bdev_alloc_inode,
	.destroy_inode = bdev_destroy_inode,
	.drop_inode = generic_delete_inode,
	.evict_inode = bdev_evict_inode,
};

可以看到bdev_alloc_inode函数负责分配该伪文件系统的inode:

struct bdev_inode {
	struct block_device bdev;
	struct inode vfs_inode;
}
static struct inode *bdev_alloc_inode(struct super_block *sb)
{
	struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL);
	if (!ei)
		return NULL;
	return &ei->vfs_inode;
}

这边分配了bdev_inode ,该结构包含block_device 和inode ,至此,我们拿到了block_device 结构。

(2)再看blkdev_get函数,如何把刚分配的block_device 和mtd设备关联起来

int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder)
{
	struct block_device *whole = NULL;
	int res;
。。。。。。。。。。。。。。。。。。。。。。。。
	res = __blkdev_get(bdev, mode, 0);
。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。
}

__blkdev_get 函数负责从gendisk中获取信息,并建立相关数据结构之间的联系,

注意_blkdev_get()传递的最后一个参数为0,也就是说默认打开的是主设备,获取到gendisk之后会分四种情况进行处理,也就是针对设备是不是第一次打开以及打开的设备是主设备还是分区来进行不同的处理,具体见代码注释

static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
{
	struct gendisk *disk;
	int ret;
	int partno;
	int perm = 0;
 
	。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。
 restart:
 
	ret = -ENXIO;
	//获取该设备的gendisk实例,如果bd_dev对应的是一个分区设备的话,partno将会被修改
-----------------------------------------------------------------------(2.1)
	disk = get_gendisk(bdev->bd_dev, &partno);
	if (!disk)
		goto out_unlock_kernel;
 
	mutex_lock_nested(&bdev->bd_mutex, for_part);
	if (!bdev->bd_openers) {//如果是第一次打开设备
		bdev->bd_disk = disk;//建立block device和gendisk之间的联系
		bdev->bd_contains = bdev;
		if (!partno) {//partno为0,也就是说打开的是主设备而不是分区,前一篇文章mtd注册的时候,每个分区都单独注册一个gendisk,所以partno都为0
			struct backing_dev_info *bdi;
 
			ret = -ENXIO;
			bdev->bd_part = disk_get_part(disk, partno);//获取gendisk中的分区数组
			if (!bdev->bd_part)
				goto out_clear;
 -------------------------------------------------------------------------(2.2)
			if (disk->fops->open) {//gendisk中定义了open方式
				ret = disk->fops->open(bdev, mode);//调用open针对具体的设备进行打开操作
				if (ret == -ERESTARTSYS) {
					/* Lost a race with 'disk' being
					 * deleted, try again.
					 * See md.c
					 */
					disk_put_part(bdev->bd_part);
					bdev->bd_part = NULL;
					module_put(disk->fops->owner);
					put_disk(disk);
					bdev->bd_disk = NULL;
					mutex_unlock(&bdev->bd_mutex);
					goto restart;
				}
				if (ret)
					goto out_clear;
			}
			if (!bdev->bd_openers) {
				bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);//从gendisk中提取容量信息设置到block device
				bdi = blk_get_backing_dev_info(bdev);
				if (bdi == NULL)
					bdi = &default_backing_dev_info;
				bdev->bd_inode->i_data.backing_dev_info = bdi;
			}
			//块设备上的分区改变导致分区在内核中的信息无效,则要重新扫描分区
			if (bdev->bd_invalidated)
				rescan_partitions(disk, bdev);
		} else {//如果打开的是分区
			struct block_device *whole;
			whole = bdget_disk(disk, 0);//获取主设备的block device实例
			ret = -ENOMEM;
			if (!whole)
				goto out_clear;
			BUG_ON(for_part);
			ret = __blkdev_get(whole, mode, 1);
			if (ret)
				goto out_clear;
			bdev->bd_contains = whole;//设置分区的block device实例的bd_contains域到主设备
			bdev->bd_inode->i_data.backing_dev_info =
			   whole->bd_inode->i_data.backing_dev_info;
			bdev->bd_part = disk_get_part(disk, partno);
			if (!(disk->flags & GENHD_FL_UP) ||
			    !bdev->bd_part || !bdev->bd_part->nr_sects) {
				ret = -ENXIO;
				goto out_clear;
			}
			bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9);
		}
	}   else {//如果不是第一次打开
		module_put(disk->fops->owner);
		put_disk(disk);
		disk = NULL;
		if (bdev->bd_contains == bdev) {//打开的是主设备
			if (bdev->bd_disk->fops->open) {
				ret = bdev->bd_disk->fops->open(bdev, mode);//调用定义的open
				if (ret)
					goto out_unlock_bdev;
			}
			if (bdev->bd_invalidated)
				rescan_partitions(bdev->bd_disk, bdev);
		}
	}
	bdev->bd_openers++;//计数值加1
	if (for_part)//如果是分区则分区计数值也加1
		bdev->bd_part_count++;
	mutex_unlock(&bdev->bd_mutex);
	unlock_kernel();
	return 0;
 
。。。。。。。。。。。。。。。。。。。。。。。。
}

(2.1)通过设备号和分区号获取gendisk结构。bdev->bd_dev设备号在block_device新建的时候从块设备的inode中获取,上面已经介绍过。

struct gendisk *get_gendisk(dev_t devt, int *partno)
{
	struct gendisk *disk = NULL;

	if (MAJOR(devt) != BLOCK_EXT_MAJOR) {
		struct kobject *kobj;

		kobj = kobj_lookup(bdev_map, devt, partno);
		if (kobj)
                    disk = dev_to_disk(kobj_to_dev(kobj));
	。。。。。。。。。。。。。。。。。。。。。。
	return disk;
}

块设备都会注册在bdev_map中。回忆一下上一篇文章,我们如何注册块设备的:

我们会为每个mtd分区调用add_disk函数:

void add_disk(struct gendisk *disk)
{
	。。。。。。。。。。。。。。。。。。。

	blk_register_region(disk_devt(disk), disk->minors, NULL,
			    exact_match, exact_lock, disk);
	register_disk(disk);
	blk_register_queue(disk);
。。。。。。。。。。。。。。。。。。。。。。。。。。
}

其中blk_register_region函数就是向bdev_map结构注册gendisk,可以通过设备号快速的找到该gendisk:

void blk_register_region(dev_t devt, unsigned long range, struct module *module,
			 struct kobject *(*probe)(dev_t, int *, void *),
			 int (*lock)(dev_t, void *), void *data)
{
	kobj_map(bdev_map, devt, range, module, probe, lock, data);
}

这个和字符设备的注册是类似的,字符设备注册已经在这篇文章里分析过:

https://blog.csdn.net/oqqYuJi12345678/article/details/103102159

所以回过来看上面的kobj_lookup函数,这里能通过设备号和分区号快速的找到注册的kobject结构,该结构包含在device结构中,而device结构是part0.__dev,包含在gendisk,最终我们获取到了注册的gendisk。

(2.2)gendisk是有fops操作函数集的,继续回顾上一章的内容,每个需要注册的mtd分区,都会调用如下函数:

int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
{
	struct mtd_blktrans_ops *tr = new->tr;
	struct mtd_blktrans_dev *d;
	int last_devnum = -1;
	struct gendisk *gd;
。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。
	gd = alloc_disk(1 << tr->part_bits);

	if (!gd)
		goto error2;

	new->disk = gd;
	gd->private_data = new;
	gd->major = tr->major;
	gd->first_minor = (new->devnum) << tr->part_bits;
	gd->fops = &mtd_block_ops;
。。。。。。。。。。。。。。。。。。。。。。。。。。。。
}
static const struct block_device_operations mtd_block_ops = {
	.owner		= THIS_MODULE,
	.open		= blktrans_open,
	.release	= blktrans_release,
	.ioctl		= blktrans_ioctl,
	.getgeo		= blktrans_getgeo,
};

其所以上面调用到的open函数为blktrans_open:

static int blktrans_open(struct block_device *bdev, fmode_t mode)
{
	struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
	int ret = 0;

	。。。。。。。。。。。。。。。。。。。

	if (dev->tr->open) {
		ret = dev->tr->open(dev);
		if (ret)
			goto error_put;
	}

。。。。。。。。。。。。。。。。。。。。。。。
}
static struct mtd_blktrans_dev *blktrans_dev_get(struct gendisk *disk)
{
	struct mtd_blktrans_dev *dev;
	dev = disk->private_data;
。。。。。。。。。。。。。。。。。。。。。。
	return dev;
}

mtd_blktrans_dev是disk->private_data结构,mtd_blktrans_dev是哪个呢,在上一章文章讲过,注册mtd块设备的时候,会通过通知链注册块设备,回调函数为blktrans_notify_add:

static void blktrans_notify_add(struct mtd_info *mtd)
{
	struct mtd_blktrans_ops *tr;

	if (mtd->type == MTD_ABSENT)
		return;

	list_for_each_entry(tr, &blktrans_majors, list)
		tr->add_mtd(tr, mtd);
}

这边的mtd_blktrans_ops为mtdblock_tr:

static struct mtd_blktrans_ops mtdblock_tr = {
	.name		= "mtdblock",
	.major		= 31,
	.part_bits	= 0,
	.blksize 	= 512,
	.open		= mtdblock_open,
	.flush		= mtdblock_flush,
	.release	= mtdblock_release,
	.readsect	= mtdblock_readsect,
	.writesect	= mtdblock_writesect,
	.add_mtd	= mtdblock_add_mtd,
	.remove_dev	= mtdblock_remove_dev,
	.owner		= THIS_MODULE,
};

所以接着调用mtdblock_add_mtd:

static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
{
	struct mtdblk_dev *dev = kzalloc(sizeof(*dev), GFP_KERNEL);

	if (!dev)
		return;

	dev->mbd.mtd = mtd;//把mtdblk_dev 和mtd_info 分区关联起来
	dev->mbd.devnum = mtd->index;

	dev->mbd.size = mtd->size >> 9;
	dev->mbd.tr = tr;

	if (!(mtd->flags & MTD_WRITEABLE))
		dev->mbd.readonly = 1;

	if (add_mtd_blktrans_dev(&dev->mbd))
		kfree(dev);
}

所以上面的dev->tr就是mtdblock_tr,dev->tr->open函数为mtdblock_open,那么mtd_blktrans_dev是怎么和gendisk联系起来的呢,接着看add_mtd_blktrans_dev:

int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
{
	struct mtd_blktrans_ops *tr = new->tr;
	struct mtd_blktrans_dev *d;
	int last_devnum = -1;
	struct gendisk *gd;
	int ret;

	。。。。。。。。。。。。。。。。。
	gd = alloc_disk(1 << tr->part_bits);

	if (!gd)
		goto error2;

	new->disk = gd;
	gd->private_data = new;
	gd->major = tr->major;
	gd->first_minor = (new->devnum) << tr->part_bits;
	gd->fops = &mtd_block_ops;
。。。。。。。。。。。。。。。。。。。。。。。。。。。
//初始化request_queue,并把request_queue的request_fn初始化为mtd_blktrans_request
//make_request_fn初始化为blk_queue_bio
    new->rq = blk_init_queue(mtd_blktrans_request, &new->queue_lock);

	if (!new->rq)
		goto error3;

	new->rq->queuedata = new;//把mtd_blktrans_dev 赋值给queuedata,后面读写数据用到
	blk_queue_logical_block_size(new->rq, tr->blksize);

	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, new->rq);

	if (tr->discard) {
		queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, new->rq);
		new->rq->limits.max_discard_sectors = UINT_MAX;
	}

	gd->queue = new->rq;//把request_queue赋值给gendisk,后面读写数据时会用到
/* Create processing workqueue */
	new->wq = alloc_workqueue("%s%d", 0, 0,
				  tr->name, new->mtd->index);
	if (!new->wq)
		goto error4;
	INIT_WORK(&new->work, mtd_blktrans_work);//该工作队列后面用来处理读写request

	。。。。。。。。。。。。。。。。。。。。。。
}

最终看到。mtd_blktrans_dev和gd->private_data中,关联起来了。

2.2 分配super_block结构

struct super_block *sget(struct file_system_type *type,
			int (*test)(struct super_block *,void *),
			int (*set)(struct super_block *,void *),
			int flags,
			void *data)
{
	struct super_block *s = NULL;
	struct super_block *old;
	int err;
。。。。。。。。。。。。。。。。。。。。。。。
	if (!s) {
		spin_unlock(&sb_lock);
		s = alloc_super(type, flags);
		if (!s)
			return ERR_PTR(-ENOMEM);
		goto retry;
	}
		
	err = set(s, data);
	。。。。。。。。。。。。。。。。。。。。。
	return s;
}

set(s, data);为set_bdev_super:

static int set_bdev_super(struct super_block *s, void *data)
{
	s->s_bdev = data;//data为之前分配的block_device
	s->s_dev = s->s_bdev->bd_dev;//记录块设备号

	/*
	 * We set the bdi here to the queue backing, file systems can
	 * overwrite this in ->fill_super()
	 */
	s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info;
	return 0;
}

2.3 ext2_fill_super填充超级块

填充ext2文件系统的超级块,需要从flash上面读取数据,初始化该超级块,所以这边是理清上层文件系统和下层驱动之间关系的重头戏,当然在该函数里页完成文件系统挂载的重要工作,为该文件系统的根目录创建dentry和inode结构:

static int ext2_fill_super(struct super_block *sb, void *data, int silent)
{
    struct buffer_head * bh;
    struct ext2_sb_info * sbi;
    struct ext2_super_block * es;
    struct inode *root;
    unsigned long sb_block = get_sb_block(&data);
    unsigned long logic_sb_block;
    unsigned long offset = 0;
    int blocksize = BLOCK_SIZE;
    int db_count;
 
 
    sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); //分配ext2_sb_info结构
    if (!sbi)
        goto failed;
 
 
    sb->s_fs_info = sbi; //VFS中的super_block通过sb->s_fs_info与ext2_sb_info相连接
    sbi->s_sb_block = sb_block;
 
 
    blocksize = sb_min_blocksize(sb, BLOCK_SIZE);
    ......
。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。(2.3.1)
    if (!(bh = sb_bread(sb, logic_sb_block))) { //从磁盘中读取原始的超级块结构ext2_super_block
        ext2_msg(sb, KERN_ERR, "error: unable to read superblock");
        goto failed_sbi;
    }
    es = (struct ext2_super_block *) (((char *)bh->b_data) + offset);
    sbi->s_es = es;
    ......
    sb->s_magic = le16_to_cpu(es->s_magic);
    blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
 
 
/*如果超级块的实际块大小与假设的大小不一致就重新读取超级块,因为超级块占用一个块大小,函数sb_bread也是从指定块号读取一个块大小,如果实际块与假设的块大小不一致就重新读取一个准确的块
大小*/
    if (sb->s_blocksize != blocksize) { 
        brelse(bh);
 
 
        if (!sb_set_blocksize(sb, blocksize)) {
            ext2_msg(sb, KERN_ERR,
                "error: bad blocksize %d", blocksize);
            goto failed_sbi;
        }
        logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize;
        offset = (sb_block*BLOCK_SIZE) % blocksize;
        bh = sb_bread(sb, logic_sb_block);
        es = (struct ext2_super_block *) (((char *)bh->b_data) + offset);
        sbi->s_es = es;
    }
    ......
    sbi->s_frags_per_block = sb->s_blocksize / sbi->s_frag_size;
 
 
    sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
    sbi->s_frags_per_group = le32_to_cpu(es->s_frags_per_group);
    sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
 
 
    sbi->s_inodes_per_block = sb->s_blocksize / EXT2_INODE_SIZE(sb);
    sbi->s_itb_per_group = sbi->s_inodes_per_group /
                    sbi->s_inodes_per_block;
    sbi->s_desc_per_block = sb->s_blocksize /
                    sizeof (struct ext2_group_desc);
    sbi->s_sbh = bh; //让s_sbh指向原始超级块数据
    sbi->s_mount_state = le16_to_cpu(es->s_state);
    sbi->s_addr_per_block_bits =
        ilog2 (EXT2_ADDR_PER_BLOCK(sb));
    sbi->s_desc_per_block_bits =
        ilog2 (EXT2_DESC_PER_BLOCK(sb));
    ......
    sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) -
                le32_to_cpu(es->s_first_data_block) - 1)
                    / EXT2_BLOCKS_PER_GROUP(sb)) + 1;
    db_count = (sbi->s_groups_count + EXT2_DESC_PER_BLOCK(sb) - 1) /
           EXT2_DESC_PER_BLOCK(sb);
    sbi->s_group_desc = kmalloc (db_count * sizeof (struct buffer_head *), GFP_KERNEL);
    ......
    for (i = 0; i < db_count; i++) { //读出所有组描述符
        block = descriptor_loc(sb, logic_sb_block, i);
        sbi->s_group_desc[i] = sb_bread(sb, block);
        if (!sbi->s_group_desc[i]) {
            for (j = 0; j < i; j++)
                brelse (sbi->s_group_desc[j]);
            ext2_msg(sb, KERN_ERR,
                "error: unable to read group descriptors");
            goto failed_mount_group_desc;
        }
    }
    sbi->s_gdb_count = db_count; //设置组描述符所占用的块数
......
/*初始化预分配窗口*/
    sbi->s_rsv_window_head.rsv_start = EXT2_RESERVE_WINDOW_NOT_ALLOCATED;
    sbi->s_rsv_window_head.rsv_end = EXT2_RESERVE_WINDOW_NOT_ALLOCATED;
    sbi->s_rsv_window_head.rsv_alloc_hit = 0;
    sbi->s_rsv_window_head.rsv_goal_size = 0;
    ext2_rsv_window_add(sb, &sbi->s_rsv_window_head);
	......
sb->s_op = &ext2_sops; //设置super_operations
	......
-----------------------------------------------------------(2.3.2)
    root = ext2_iget(sb, EXT2_ROOT_INO); 
    if (IS_ERR(root)) {
        ret = PTR_ERR(root);
        goto failed_mount3;
    }
 
 -------------------------------------------------------------(2.3.3)
    sb->s_root = d_make_root(root); //创建根目录的dentry
 
 
    ......
    ext2_write_super(sb);
    ......

(2.3.1)从flash分区上面读取ext2文件系统的第一个块,来初始化文件系统的super_block。sb_bread这个函数很关键,理解了这个函数,就理解了从flash上面读取数据的整个流程。buffer_head 这个结构用来存放从flash中读取到的数据。上层文件系统利用该结构和mtd层进行交互。

static inline struct buffer_head *
sb_bread(struct super_block *sb, sector_t block)
{
	return __bread(sb->s_bdev, block, sb->s_blocksize);//block是需要读取的块号
}
struct buffer_head *
__bread(struct block_device *bdev, sector_t block, unsigned size)
{
------------------------------------------------------(2.3.1.1)
	struct buffer_head *bh = __getblk(bdev, block, size);//先看缓存里面是否已经有该buffer_head存在

	if (likely(bh) && !buffer_uptodate(bh))
--------------------------------------------------------(2.3.1.2)
		bh = __bread_slow(bh);//该buffer_head需要重新从flash上面读取数据
	return bh;
}

(2.3.1.1)

struct buffer_head *
__getblk(struct block_device *bdev, sector_t block, unsigned size)
{
	struct buffer_head *bh = __find_get_block(bdev, block, size);//先从缓存中查找

	might_sleep();
	if (bh == NULL)
		bh = __getblk_slow(bdev, block, size);//找不到的话,再重新分配空间
	return bh;
}

__getblk

     ----------->__find_get_block

struct buffer_head *
__find_get_block(struct block_device *bdev, sector_t block, unsigned size)
{
	struct buffer_head *bh = lookup_bh_lru(bdev, block, size);//先从bh_lrus.bhs链表中查找是否已经存在,第一次读取肯定找不到

	if (bh == NULL) {
		bh = __find_get_block_slow(bdev, block);//再从address_space的基数树中分配一块空间给buffer_head,如果没有空间了,则直接返回
		if (bh)
			bh_lru_install(bh);//安装到bh_lrus.bhs链表
	}
	if (bh)
		touch_buffer(bh);
	return bh;
}

我们看一下这个函数__find_get_block_slow:

static struct buffer_head *
__find_get_block_slow(struct block_device *bdev, sector_t block)
{
	struct inode *bd_inode = bdev->bd_inode;
	struct address_space *bd_mapping = bd_inode->i_mapping;
	struct buffer_head *ret = NULL;
	pgoff_t index;
	struct buffer_head *bh;
	struct buffer_head *head;
	struct page *page;
	int all_mapped = 1;

	index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
	page = find_get_page(bd_mapping, index);//从address_space->page_tree中查找是否有空闲page可用
	if (!page)
		goto out;

	spin_lock(&bd_mapping->private_lock);
	if (!page_has_buffers(page))
		goto out_unlock;
	head = page_buffers(page);//如果有,则把该块空间分配给buffer_head 
	bh = head;
	do {
		if (!buffer_mapped(bh))
			all_mapped = 0;
		else if (bh->b_blocknr == block) {
			ret = bh;
			get_bh(bh);
			goto out_unlock;
		}
		bh = bh->b_this_page;
	} while (bh != head);
。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。
}

这边假设都是第一次进来,无法从address_space->page_tree中分配到空间,所以需要执行下一步:

__getblk

     ----------->__getblk_slow

static struct buffer_head *
__getblk_slow(struct block_device *bdev, sector_t block, int size)
{
。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。

	for (;;) {
		struct buffer_head *bh;
		int ret;

		bh = __find_get_block(bdev, block, size);//第一次肯定找不到
		if (bh)
			return bh;

		ret = grow_buffers(bdev, block, size);//为address_space->page_tree基数树分配空间
。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。
	}
}

__getblk

     ----------->__getblk_slow

          ---------------->grow_dev_page

static int
grow_dev_page(struct block_device *bdev, sector_t block,
		pgoff_t index, int size, int sizebits)
{
	struct inode *inode = bdev->bd_inode;
	struct page *page;
	struct buffer_head *bh;
	sector_t end_block;
	int ret = 0;		/* Will call free_more_memory() */
//先分配一页,并把该空间放入address_space->page_tree基数树
	page = find_or_create_page(inode->i_mapping, index,
		(mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
	if (!page)
		return ret;

	BUG_ON(!PageLocked(page));

	if (page_has_buffers(page)) {
		bh = page_buffers(page);
		if (bh->b_size == size) {
			end_block = init_page_buffers(page, bdev,
						index << sizebits, size);
			goto done;
		}
		if (!try_to_free_buffers(page))
			goto failed;
	}

	/*
	 * Allocate some buffers for this page
	 */
//size是每个bh的大小,把每页,根据size的大小,重新分割,分成多个bh,并串接起来
	bh = alloc_page_buffers(page, size, 0);
	if (!bh)
		goto failed;

	/*
	 * Link the page to the buffers and initialise them.  Take the
	 * lock to be atomic wrt __find_get_block(), which does not
	 * run under the page lock.
	 */
	spin_lock(&inode->i_mapping->private_lock);
	link_dev_buffers(page, bh);
	end_block = init_page_buffers(page, bdev, index << sizebits, size);
	spin_unlock(&inode->i_mapping->private_lock);
done:
	ret = (block < end_block) ? 1 : -ENXIO;
failed:
	unlock_page(page);
	page_cache_release(page);
	return ret;
}

这样以后,我们就能通过__find_get_block函数,拿到buffer_head 了

(2.3.1.2)得到了buffer_head以后,就可以利用__bread_slow把数据读到buffer_head中了:

static struct buffer_head *__bread_slow(struct buffer_head *bh)
{
	。。。。。。。。。。。。。。。。。。。。。。。。
		bh->b_end_io = end_buffer_read_sync;//数据读完以后需要做一些收尾工作
		submit_bh(READ, bh);//读取数据的函数
		wait_on_buffer(bh);//等待数据读取完成
		if (buffer_uptodate(bh))
			return bh;
。。。。。。。。。。。。。。。。。。
}

submit_bh

   -------------->_submit_bh

bio是linux内核里文件系统层和block层之间沟通的数据结构(有点像sk_buffer之于网络)

int _submit_bh(int rw, struct buffer_head *bh, unsigned long bio_flags)
{
	struct bio *bio;
	int ret = 0;

	bio = bio_alloc(GFP_NOIO, 1);//分配bio 结构

	bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
	bio->bi_bdev = bh->b_bdev;//把block_device赋值给bio 
	bio->bi_io_vec[0].bv_page = bh->b_page;
	bio->bi_io_vec[0].bv_len = bh->b_size;
	bio->bi_io_vec[0].bv_offset = bh_offset(bh);

	bio->bi_vcnt = 1;
	bio->bi_size = bh->b_size;

	bio->bi_end_io = end_bio_bh_io_sync;//完成数据读的收尾工作
	bio->bi_private = bh;
	bio->bi_flags |= bio_flags;

	/* Take care of bh's that straddle the end of the device */
	guard_bh_eod(rw, bio, bh);

	if (buffer_meta(bh))
		rw |= REQ_META;
	if (buffer_prio(bh))
		rw |= REQ_PRIO;

	bio_get(bio);
	submit_bio(rw, bio);//提交读写请求

	if (bio_flagged(bio, BIO_EOPNOTSUPP))
		ret = -EOPNOTSUPP;

	bio_put(bio);
	return ret;
}

submit_bio

     -------------->generic_make_request

把bio变成request,怎么个变法?如果几个bio要读写的区域是连续的,就攒成一个request(一个request下挂多个连续bio,就是通常说的“合并bio请求”);如果这个bio跟其它bio都连不上,那它自己就创建一个新的request,把自己挂到这个request下。合并bio请求也是有限度的,如果这些连续bio的访问区域加起来超过了一定的大小(在/sys/block/xxx/queue/max_sectors_kb里设置),那么就不能再合并成一个request了。

void generic_make_request(struct bio *bio)
{
	struct bio_list bio_list_on_stack;

	if (current->bio_list) {
		bio_list_add(current->bio_list, bio);
		return;
	}


	bio_list_init(&bio_list_on_stack);
	current->bio_list = &bio_list_on_stack;
	do {
		struct request_queue *q = bdev_get_queue(bio->bi_bdev);//通过前面block_device已经和分区的gendisk建立起了关系,所以这边获取分区的gendisk的request_queue

		q->make_request_fn(q, bio);//把bio结构放入queue中,上面注册gendisk的时候,我们知道
//make_request_fn函数为blk_queue_bio

		bio = bio_list_pop(current->bio_list);
	} while (bio);
	current->bio_list = NULL; /* deactivate */
}

blk_queue_bio中就包合并bio的算法,这边不是本文分析的重点,先略过:

static void blk_queue_bio(struct request_queue *q, struct bio *bio)
{
    const bool sync = !!(bio->bi_rw & REQ_SYNC);
    struct blk_plug *plug;
    int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT;
    struct request *req;
    unsigned int request_count = 0;

    /*
     * low level driver can indicate that it wants pages above a
     * certain limit bounced to low memory (ie for highmem, or even
     * ISA dma in theory)
     */
/* 为了建立bounce buffer,以防止不适合这次I/O操作的时候利用bounce buffer*/
    blk_queue_bounce(q, &bio);                    

    if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {    //数据完整性校验
        bio_endio(bio, -EIO);
        return;
    }

    if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) {
        spin_lock_irq(q->queue_lock);
        where = ELEVATOR_INSERT_FLUSH;
        goto get_rq;
    }

    /*
     * Check if we can merge with the plugged list before grabbing
     * any locks.
     */
    if (!blk_queue_nomerges(q) &&                  //请求队列不允许合并请求
        blk_attempt_plug_merge(q, bio, &request_count))    //将bio合并到当前plugged的请求队列中
        return;

    spin_lock_irq(q->queue_lock);

    el_ret = elv_merge(q, &req, bio);              //elv_merge是核心函数,找到bio前向或者后向合并的请求
    if (el_ret == ELEVATOR_BACK_MERGE) {            //进行后向合并操作
        if (bio_attempt_back_merge(q, req, bio)) {
            elv_bio_merged(q, req, bio);
            if (!attempt_back_merge(q, req))
                elv_merged_request(q, req, el_ret);
            goto out_unlock;
        }
    } else if (el_ret == ELEVATOR_FRONT_MERGE) {      // 进行前向合并操作
        if (bio_attempt_front_merge(q, req, bio)) {
            elv_bio_merged(q, req, bio);
            if (!attempt_front_merge(q, req))
                elv_merged_request(q, req, el_ret);
            goto out_unlock;
        }
    }
/* 无法找到对应的请求实现合并 */
get_rq:
    /*
     * This sync check and mask will be re-done in init_request_from_bio(),
     * but we need to set it earlier to expose the sync flag to the
     * rq allocator and io schedulers.
     */
    rw_flags = bio_data_dir(bio);
    if (sync)
        rw_flags |= REQ_SYNC;

    /*
     * Grab a free request. This is might sleep but can not fail.
     * Returns with the queue unlocked.
     */
    req = get_request(q, rw_flags, bio, GFP_NOIO);          //获取一个empty request请求
    if (IS_ERR(req)) {
        bio_endio(bio, PTR_ERR(req));    /* @q is dead */
        goto out_unlock;
    }

    /*
     * After dropping the lock and possibly sleeping here, our request
     * may now be mergeable after it had proven unmergeable (above).
     * We don't worry about that case for efficiency. It won't happen
     * often, and the elevators are able to handle it.
     */
    init_request_from_bio(req, bio);                  //采用bio对request请求进行初始化

    if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags))
        req->cpu = raw_smp_processor_id();

    plug = current->plug;
    if (plug) {
        /*
         * If this is the first request added after a plug, fire
         * of a plug trace.
         */
        if (!request_count)
            trace_block_plug(q);
        else {
            if (request_count >= BLK_MAX_REQUEST_COUNT) {
                blk_flush_plug_list(plug, false);            //请求数量达到队列上限值,进行unplug操作
                trace_block_plug(q);
            }
        }
        list_add_tail(&req->queuelist, &plug->list);          //将请求加入到队列
        blk_account_io_start(req, true);
    } else {
        spin_lock_irq(q->queue_lock);
        add_acct_request(q, req, where);
        __blk_run_queue(q);
out_unlock:
        spin_unlock_irq(q->queue_lock);
    }
}

对于每一次读写flash,都对应一个request请求,先调用init_request_from_bio函数,把bio放到该request中,假设我们没有用plug,所以调用add_acct_request把request放入request_queue中。然后调用__blk_run_queue

__blk_run_queue

    --------------->__blk_run_queue_uncond

inline void __blk_run_queue_uncond(struct request_queue *q)
{
	if (unlikely(blk_queue_dead(q)))
		return;

	/*
	 * Some request_fn implementations, e.g. scsi_request_fn(), unlock
	 * the queue lock internally. As a result multiple threads may be
	 * running such a request function concurrently. Keep track of the
	 * number of active request_fn invocations such that blk_drain_queue()
	 * can wait until all these request_fn calls have finished.
	 */
	q->request_fn_active++;
	q->request_fn(q);
	q->request_fn_active--;
}

核心函数是request_fn,上面注册gendisk的时候知道,该函数为mtd_blktrans_request:

static void mtd_blktrans_request(struct request_queue *rq)
{
	struct mtd_blktrans_dev *dev;
	struct request *req = NULL;

	dev = rq->queuedata;//前面注册的时候初始化为mtd_blktrans_dev了

	if (!dev)
		while ((req = blk_fetch_request(rq)) != NULL)
			__blk_end_request_all(req, -ENODEV);
	else
		queue_work(dev->wq, &dev->work);//前面初始化的时候知道该work函数为mtd_blktrans_work
}

最终用工作队列来处理该读写请求,具体的工作队列处理函数为mtd_blktrans_work:

static void mtd_blktrans_work(struct work_struct *work)
{
	struct mtd_blktrans_dev *dev =
		container_of(work, struct mtd_blktrans_dev, work);
	struct mtd_blktrans_ops *tr = dev->tr;
	struct request_queue *rq = dev->rq;
	struct request *req = NULL;
	int background_done = 0;

	spin_lock_irq(rq->queue_lock);

	while (1) {
		int res;

		dev->bg_stop = false;
//从队列里面获取需要处理的request
		if (!req && !(req = blk_fetch_request(rq))) {
			if (tr->background && !background_done) {
				spin_unlock_irq(rq->queue_lock);
				mutex_lock(&dev->lock);
				tr->background(dev);
				mutex_unlock(&dev->lock);
				spin_lock_irq(rq->queue_lock);
				/*
				 * Do background processing just once per idle
				 * period.
				 */
				background_done = !dev->bg_stop;
				continue;
			}
			break;
		}

		spin_unlock_irq(rq->queue_lock);

		mutex_lock(&dev->lock);
//处理具体的request
		res = do_blktrans_request(dev->tr, dev, req);
		mutex_unlock(&dev->lock);

		spin_lock_irq(rq->queue_lock);

		if (!__blk_end_request_cur(req, res))
			req = NULL;

		background_done = 0;
	}

	if (req)
		__blk_end_request_all(req, -EIO);

	spin_unlock_irq(rq->queue_lock);
}

看一下该函数do_blktrans_request:

static int do_blktrans_request(struct mtd_blktrans_ops *tr,
			       struct mtd_blktrans_dev *dev,
			       struct request *req)
{
	unsigned long block, nsect;
	char *buf;

	block = blk_rq_pos(req) << 9 >> tr->blkshift;
	nsect = blk_rq_cur_bytes(req) >> tr->blkshift;

	buf = req->buffer;
。。。。。。。。。。。。。。。。。。。。。。。。。。。。

	switch(rq_data_dir(req)) {
	case READ: //读请求需要调用mtd_blktrans_ops 的readsect,该函数由前面可知为mtdblock_readsect
		for (; nsect > 0; nsect--, block++, buf += tr->blksize)
			if (tr->readsect(dev, block, buf))
				return -EIO;
		rq_flush_dcache_pages(req);
		return 0;
	case WRITE:
		if (!tr->writesect)
			return -EIO;

		rq_flush_dcache_pages(req);
		for (; nsect > 0; nsect--, block++, buf += tr->blksize)
			if (tr->writesect(dev, block, buf))
				return -EIO;
		return 0;
。。。。。。。。。。。。。。。。。。。。
}

所以接着调用mtdblock_readsect

static int mtdblock_readsect(struct mtd_blktrans_dev *dev,
			      unsigned long block, char *buf)
{
//由注册的时候可知mtd_blktrans_dev 包含在mtdblk_dev,由mtdblk_dev可以找到分区的所有信息
	struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd);
	return do_cached_read(mtdblk, block<<9, 512, buf);
}
static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
			   int len, char *buf)
{
	struct mtd_info *mtd = mtdblk->mbd.mtd;//前面注册的时候把mtd_info 放入了这里
	unsigned int sect_size = mtdblk->cache_size;
	size_t retlen;
	int ret;

	pr_debug("mtdblock: read on \"%s\" at 0x%lx, size 0x%x\n",
			mtd->name, pos, len);

	if (!sect_size)
		return mtd_read(mtd, pos, len, &retlen, buf);//读取数据,mtd_read调用了mtd->_read

	while (len > 0) {
		unsigned long sect_start = (pos/sect_size)*sect_size;
		unsigned int offset = pos - sect_start;
		unsigned int size = sect_size - offset;
		if (size > len)
			size = len;

		/*
		 * Check if the requested data is already cached
		 * Read the requested amount of data from our internal cache if it
		 * contains what we want, otherwise we read the data directly
		 * from flash.
		 */
		if (mtdblk->cache_state != STATE_EMPTY &&
		    mtdblk->cache_offset == sect_start) {
			memcpy (buf, mtdblk->cache_data + offset, size);//把督导的数据复制到buffer中
		} else {
			ret = mtd_read(mtd, pos, size, &retlen, buf);
			if (ret)
				return ret;
			if (retlen != size)
				return -EIO;
		}

		buf += size;
		pos += size;
		len -= size;
	}

	return 0;
}

mtd_read中主要代码为mtd->_read,那么该read函数是哪一个呢,我们在上一章注册mtd驱动的时候,对于每一个分区,有这样调用:

s3c2410_nand_add_partition

     ----------------->mtd_device_parse_register

            -------------------->add_mtd_partitions

static struct mtd_part *allocate_partition(struct mtd_info *master,
			const struct mtd_partition *part, int partno,
			uint64_t cur_offset)
{
	struct mtd_part *slave;
	char *name;

	/* allocate the partition structure */
	slave = kzalloc(sizeof(*slave), GFP_KERNEL);
。。。。。。。。。。。。

	slave->mtd._read = part_read;
	slave->mtd._write = part_write;

。。。。。。。。。。。。。。。。。。
}

所以该函数为part_read:

static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
		size_t *retlen, u_char *buf)
{
	struct mtd_part *part = PART(mtd);
。。。。。。。。。。。。。。。。。。。。。。。。。
	res = part->master->_read(part->master, from + part->offset, len,
				  retlen, buf);
	。。。。。。。。。。。。。。。。。。
}

上一章我们把flash分成4个分区,每个分区指向一个共同的master,最终注册了各分区,但是有些共同的函数,还是需要调用master的。还是回到上章驱动注册

s3c24xx_nand_probe

     ------------------->nand_scan_tail

nand_scan_tail中初始化master 的信息:

int nand_scan_tail(struct mtd_info *mtd)
{
	int i;
	struct nand_chip *chip = mtd->priv;

。。。。。。。。。。。。。。。。。。
	mtd->_read = nand_read;
。。。。。。。。。。。。。。。。。。。
}

所以接着又调用的到了nand_read:

nand_read

    ------------------>nand_do_read_ops

static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
			    struct mtd_oob_ops *ops)
{
	int chipnr, page, realpage, col, bytes, aligned, oob_required;
	struct nand_chip *chip = mtd->priv;//获取分区中指向的具体的chip,该初始化是在s3c2410_nand_init_chip中,把chip赋值给mtd->priv
	struct mtd_ecc_stats stats;
	int ret = 0;
	uint32_t readlen = ops->len;
	uint32_t oobreadlen = ops->ooblen;
	uint32_t max_oobsize = ops->mode == MTD_OPS_AUTO_OOB ?
		mtd->oobavail : mtd->oobsize;

	uint8_t *bufpoi, *oob, *buf;
	unsigned int max_bitflips = 0;

	stats = mtd->ecc_stats;

	chipnr = (int)(from >> chip->chip_shift);
	chip->select_chip(mtd, chipnr);

	realpage = (int)(from >> chip->page_shift);
	page = realpage & chip->pagemask;

	col = (int)(from & (mtd->writesize - 1));

	buf = ops->datbuf;
	oob = ops->oobbuf;
	oob_required = oob ? 1 : 0;

	while (1) {
		bytes = min(mtd->writesize - col, readlen);
		aligned = (bytes == mtd->writesize);

		/* Is the current page in the buffer? */
		if (realpage != chip->pagebuf || oob) {
			bufpoi = aligned ? buf : chip->buffers->databuf;

			chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);

			/*
			 * Now read the page into the buffer.  Absent an error,
			 * the read methods return max bitflips per ecc step.
			 */
			if (unlikely(ops->mode == MTD_OPS_RAW))
				ret = chip->ecc.read_page_raw(mtd, chip, bufpoi,
							      oob_required,
							      page);
			else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
				 !oob)
				ret = chip->ecc.read_subpage(mtd, chip,
							col, bytes, bufpoi);
			else
				ret = chip->ecc.read_page(mtd, chip, bufpoi,
							  oob_required, page);
			if (ret < 0) {
				if (!aligned)
					/* Invalidate page cache */
					chip->pagebuf = -1;
				break;
			}

			max_bitflips = max_t(unsigned int, max_bitflips, ret);

			/* Transfer not aligned data */
			if (!aligned) {
				if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
				    !(mtd->ecc_stats.failed - stats.failed) &&
				    (ops->mode != MTD_OPS_RAW)) {
					chip->pagebuf = realpage;
					chip->pagebuf_bitflips = ret;
				} else {
					/* Invalidate page cache */
					chip->pagebuf = -1;
				}
				memcpy(buf, chip->buffers->databuf + col, bytes);
			}

			buf += bytes;

			if (unlikely(oob)) {
				int toread = min(oobreadlen, max_oobsize);

				if (toread) {
					oob = nand_transfer_oob(chip,
						oob, ops, toread);
					oobreadlen -= toread;
				}
			}

			if (chip->options & NAND_NEED_READRDY) {
				/* Apply delay or wait for ready/busy pin */
				if (!chip->dev_ready)
					udelay(chip->chip_delay);
				else
					nand_wait_ready(mtd);
			}
		} else {
			memcpy(buf, chip->buffers->databuf + col, bytes);
			buf += bytes;
			max_bitflips = max_t(unsigned int, max_bitflips,
					     chip->pagebuf_bitflips);
		}

		readlen -= bytes;

		if (!readlen)
			break;

		/* For subsequent reads align to page boundary */
		col = 0;
		/* Increment page address */
		realpage++;

		page = realpage & chip->pagemask;
		/* Check, if we cross a chip boundary */
		if (!page) {
			chipnr++;
			chip->select_chip(mtd, -1);
			chip->select_chip(mtd, chipnr);
		}
	}
	chip->select_chip(mtd, -1);

	ops->retlen = ops->len - (size_t) readlen;
	if (oob)
		ops->oobretlen = ops->ooblen - oobreadlen;

	if (ret < 0)
		return ret;

	if (mtd->ecc_stats.failed - stats.failed)
		return -EBADMSG;

	return max_bitflips;
}

可以看到,通过mtd层,终于调用到具体的driver层,调用nand_chip提供的具体的flash芯片操作方法来读取数据。读取完数据以后,回到前面工作线程mtd_blktrans_work中,通知读数据已经完成:

__blk_end_request_cur最终会调用到blk_update_request:

blk_update_request

    ---------------->req_bio_endio

static void req_bio_endio(struct request *rq, struct bio *bio,
              unsigned int nbytes, int error)
{
    ......
    ......
 
 
    bio->bi_size -= nbytes;        // 更新 bio 的数据长度和起始扇区号
    bio->bi_sector += (nbytes >> 9);
 
 
    if (bio_integrity(bio))        
        bio_integrity_advance(bio, nbytes);
            // 与 integrity data 相关的函数和结构体我还没有研究过,
            // 不过bio_integrity_advance这个函数只对bio->bi_integrity
            // 进行操作,不会改变 bio 的其他重要成员
 
 
    /* don't actually finish bio if it's part of flush sequence */
    if (bio->bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ))
        bio_endio(bio, error);    // 如果bio被完成了(size=0),调用bio_endio
}

// bio_endio 函数中主要是对 bio->bi_end_io 的调用,bi_end_io为end_bio_bh_io_sync:

static void end_bio_bh_io_sync(struct bio *bio, int err)
{
	struct buffer_head *bh = bio->bi_private;
。。。。。。。。。。。。。
	bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
。。。。。。。。。。。。。
}

b_end_io为end_buffer_read_sync

end_buffer_read_sync

     -------------->__end_buffer_read_notouch

            -------------------->unlock_buffer

void unlock_buffer(struct buffer_head *bh)
{
	clear_bit_unlock(BH_Lock, &bh->b_state);
	smp_mb__after_clear_bit();
	wake_up_bit(&bh->b_state, BH_Lock);
}

unlock_buffer会唤醒上面__bread_slow函数中,在wait_on_buffer上等待的进程。至此,读写flash流程结束。

(2.3.2)为文件系统根目录初始化一个inode结构

(2.3.3)为文件系统根目录初始化一个dentry结构

最后用一张图总结一下上面的流程:

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章