为了研究omfs文件系统是如何进行文件的write操作,我在应用层写了个小程序:
相关阅读:
Linux文件系统omfs的目录创建和删除 http://www.linuxidc.com/Linux/2012-02/54025.htm
Linux文件系统omfs的目录创建和删除 http://www.linuxidc.com/Linux/2012-02/54025.htm
Linux文件系统omfs的普通文件创建和删除 http://www.linuxidc.com/Linux/2012-02/54026.htm
#include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <fcntl.h>
#include <errno.h>
Void main()
{
Int I;
Char w[16384];
Int fd;
Int count;
Fd = open(“/mnt/point1/cccc”,O_RDWR);
Perror(“open”);
For( I = 0; I < 16384; i++)
{
W[i] = 0x99;
}
Lseek(fd,0,SEEK_END);
Count = write(fd,w,16384);
Perror(“program”);
Printf(“count: %d\n”, count);
}
这个程序的目的主要在于写入/mnt/point1/目录下的某个文件,而我们的omfs文件系统就挂载在该目录下。
目前该目录下的情况如下:
ls –ali
接下来我们对cccc这个文件写入16384个字节,每个字节都是0x99.
在omfs_get_block函数下断点:
函数堆栈如下:
#0 omfs_get_block (inode=0xddf2c870, block=0, bh_result=0xddfd6818, create=1)
at fs/omfs/file.c:230
#1 0xc02c1c5f in __block_prepare_write (inode=0xddf2c870, page=0xc10d1020,
from=0, to=4096, get_block=0xe278d2ca <omfs_get_block>) at fs/buffer.c:1880
#2 0xc02c1f9d in block_write_begin (file=0xdd046480, mapping=0xddf2c920,
pos=0, len=4096, flags=0, pagep=0xddbb7d80, fsdata=0xddbb7d6c,
get_block=0xe278d2ca <omfs_get_block>) at fs/buffer.c:1995
#3 0xe278d642 in omfs_write_begin (file=0xdd046480, mapping=0xddf2c920,
pos=0, len=4096, flags=0, pagep=0xddbb7d80, fsdata=0xddbb7d6c)
at fs/omfs/file.c:316
#4 0xc0231a32 in generic_perform_write (file=0xdd046480, i=0xddbb7dc0, pos=0)
at mm/filemap.c:2236
#5 0xc0231bd9 in generic_file_buffered_write (iocb=0xddbb7ec4,
iov=0xddbb7f34, nr_segs=1, pos=0, ppos=0xddbb7ef8, count=16384, written=0)
at mm/filemap.c:2292
#6 0xc0231f99 in __generic_file_aio_write (iocb=0xddbb7ec4, iov=0xddbb7f34,
nr_segs=1, ppos=0xddbb7ef8) at mm/filemap.c:2410
#7 0xc023204a in generic_file_aio_write (iocb=0xddbb7ec4, iov=0xddbb7f34,
nr_segs=1, pos=0) at mm/filemap.c:2440
#8 0xc02902cf in do_sync_write (filp=0xdd046480,
buf=0xbf90ab7c “\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231—Type <return> to continue, or q <return> to quit—
\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231″…, len=16384, ppos=0xddbb7f94) at fs/read_write.c:320
#9 0xc02903fc in vfs_write (file=0xdd046480,
buf=0xbf90ab7c “\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231″…, count=16384, pos=0xddbb7f94) at fs/read_write.c:349
#10 0xc0290557 in sys_write (fd=3,
buf=0xbf90ab7c “\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231—Type <return> to continue, or q <return> to quit—
\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231\231″…, count=16384) at fs/read_write.c:401
#11 0xc0104657 in ?? () at arch/x86/kernel/entry_32.S:457
(gdb) p max_blocks
$61 = 1
(gdb) p bh_result->b_size
$62 = 2048
(gdb) p inode->i_blkbits
$63 = 11
因为cccc这个inode的i_no是18,所以sb_bread读的block是72=18×4
获取oe oe = (struct omfs_extent *)(&bh->b_data[OMFS_EXTENT_START]);
这个oe还是当初omfs_create的最初状态
max_extents = omfs_max_extents(sbi, OMFS_EXTENT_START);
得到的max_extents值为98
(sbi->s_sys_blocksize – offset –
sizeof(struct omfs_extent)) /
sizeof(struct omfs_extent_entry) + 1;
(2048-0x1d0-32)/16+1 = 98
从这个数据我们可以算出最大文件的大小是:98×8192×8 = 6422528字节=6.125M
next = inode->i_ino; next=18
接下来是一个循环:
for (;;) {
if (omfs_is_bad(sbi, (struct omfs_header *) bh->b_data, next)) //做一个检查
goto out_brelse;
extent_count = be32_to_cpu(oe->e_extent_count); //值为1 omfs_create时设定的
next = be64_to_cpu(oe->e_next); //值为0xffff ffff 同样是omfs_create时设定的
entry = &oe->e_entry;
if (extent_count > max_extents)
goto out_brelse;
offset = find_block(inode, entry, block, extent_count, &remain);
if (offset > 0) {
ret = 0;
map_bh(bh_result, inode->i_sb, offset);
if (remain > max_blocks)
remain = max_blocks;
bh_result->b_size = (remain << inode->i_blkbits);
goto out_brelse;
}
if (next == ~0)
break;
brelse(bh);
bh = sb_bread(inode->i_sb, clus_to_blk(sbi, next));
if (!bh)
goto out;
oe = (struct omfs_extent *) (&bh->b_data[OMFS_EXTENT_CONT]);
max_extents = omfs_max_extents(sbi, OMFS_EXTENT_CONT);
}
这个循环到底做了什么:
先看看find_block干了什么,从字面来看这个函数应该是查找空闲的block
因为count是1,所以这个find_block中的循环不执行,直接返回0.
接下来继续看omfs_get_block,
因为next == ~0,所以跳出循环。
if (create) {
ret = omfs_grow_extent(inode, oe, &new_block);
if (ret == 0) {
mark_buffer_dirty(bh);
mark_inode_dirty(inode);
map_bh(bh_result, inode->i_sb,
clus_to_blk(sbi, new_block));
}
}
omfs_grow_extent是个关键的函数,继续往里面看:
这个函数的注释是:
/*
* Add new blocks to the current extent, or create new entries/continuations
* as necessary.
*/
ret = omfs_allocate_range(inode->i_sb, 1, sbi->s_clustersize,
&new_block, &new_count);
New_block=28 new_count=8
entry->e_cluster = cpu_to_be64(new_block);
entry->e_blocks = cpu_to_be64((u64) new_count);
Breakpoint 5, omfs_get_block (inode=0xddf2c870, block=1, bh_result=0xddfd67e0,
create=1) at fs/omfs/file.c:230
Breakpoint 5, omfs_get_block (inode=0xddf2c870, block=2, bh_result=0xddfd6888,
create=1) at fs/omfs/file.c:230
Breakpoint 5, omfs_get_block (inode=0xddf2c870, block=3, bh_result=0xddfd6850,
create=1) at fs/omfs/file.c:230
Breakpoint 5, omfs_get_block (inode=0xddf2c870, block=4, bh_result=0xddfd6968,
create=1) at fs/omfs/file.c:230
Breakpoint 5, omfs_get_block (inode=0xddf2c870, block=5, bh_result=0xddfd6850,
create=1) at fs/omfs/file.c:230
Breakpoint 5, omfs_get_block (inode=0xddf2c870, block=6, bh_result=0xddfd6968,
create=1) at fs/omfs/file.c:230
Breakpoint 5, omfs_get_block (inode=0xddf2c870, block=7, bh_result=0xddfd6850,
create=1) at fs/omfs/file.c:230
我们看一下aaaa的omfs_extent信息
0xa000 5 bmap
0xc000 6 aaaa
0x10000 8 bbbb
0x14000 10
到
0x23fff 17 都是aaaa的内容
0x24000 18 cccc
0x28000 20
到
0x2c000 22 继续aaaa的内容
因为分配的是8个,虽然22—28没用,但还是属于aaaa占用了。
0x38000 28
到
0x3bff0 都是cccc的内容
Cccc的omfs_extent的信息是:
1c=28 就是cccc内容开始的地方。
到这里,omfs目录是基于hash的,文件内容的分配是基于extent的,文件的个数和大小都有限制。
Hash和extent都很普通,为什么这个文件系统称为omfs呢??????
看完了还是没明白如何针对MPEG优化的,难道要去看MPEG吗?