介绍
因为块设备驱动和网络设备驱动实际中用得较少,所以只给出驱动模板,我也没有具体测试,等到实际用到是再研究吧,溜了溜了。
块设备驱动模板
struct xxx_dev {
int size;
struct request_queue *queue;
struct gendisk *gd;
spinlock_t lock;
}
static int xxx_major;
module_param(xxx_major, int, 0);
#define HARDSECT_SIZE xxx
#define NSECTORS xxx
#define xxx_MINORS
static void xxx_disk_transfer(struct vmem_disk_dev *dev, unsigned long sector,
unsigned long nsect, char *buffer, int write)
{
unsigned long offset = sector*KERNEL_SECTOR_SIZE;
unsigned long nbytes = nsect*KERNEL_SECTOR_SIZE;
if ((offset + nbytes) > dev->size) {
printk (KERN_NOTICE "Beyond-end write (%ld %ld)\n", offset, nbytes);
return;
}
if (write)
...
else
...
}
static int gen_xfer_bio(struct xxx_dev *dev, struct bio *bio)
{
struct bio_vec bvec;
struct bvec_iter iter;
sector_t sector = bio->bi_iter.bi_sector;
bio_for_each_segment(bvec, bio, iter) {
char *buffer = __bio_kmap_atomic(bio, iter);
xxx_disk_transfer(dev, sector, bio_cur_bytes(bio) >> 9,
buffer, bio_data_dir(bio) == WRITE);
sector += bio_cur_bytes(bio) >> 9;
__bio_kunmap_atomic(buffer);
}
return 0;
}
static void xxx_make_request(struct request_queue *q, struct bio *bio)
{
struct xxx_dev *dev = q->queuedata;
int status;
status = gen_xfer_bio(dev, bio);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0)
bio->bi_error = status;
bio_endio(bio);
#else
bio_endio(bio, status);
#endif
}
static void vmem_disk_request(struct request_queue *q)
{
struct request *req;
struct bio *bio;
while ((req = blk_peek_request(q)) != NULL) {
struct xxx_dev *dev = req->rq_disk->private_data;
if (req->cmd_type != REQ_TYPE_FS) {
printk (KERN_NOTICE "Skip non-fs request\n");
blk_start_request(req);
__blk_end_request_all(req, -EIO);
continue;
}
blk_start_request(req);
__rq_for_each_bio(bio, req)
gen_xfer_bio(dev, bio);
__blk_end_request_all(req, 0);
}
}
xxx_major = register_blkdev(xxx_major, "vmem_disk");
struct xxx_dev *dev = kzalloc(NSECTORS*sizeof(struct xxx_dev), GFP_KERNEL);
dev->size = NSECTORS*HARDSECT_SIZE;
dev->queue = blk_alloc_queue(GFP_KERNEL);
blk_queue_make_request(dev->queue, xxx_make_request);
dev->queue = blk_init_queue(xxx_request, &dev->lock);
blk_queue_logical_block_size(dev->queue, HARDSECT_SIZE);
dev->queue->queuedata = dev;
dev->gd = alloc_disk(xxx_MINORS);
dev->gd->major = xxx_major;
dev->gd->first_minor = 0;
dev->gd->fops = &xxx_ops;
dev->gd->queue = dev->queue;
dev->gd->private_data = dev;
set_capacity(dev->gd, NSECTORS*(HARDSECT_SIZE/KERNEL_SECTOR_SIZE));
add_disk(dev->gd);
用户每进行一次对硬盘的操作, 都会被操作系统处理成一个请求, 然后放入相应的请求队列中(该请求队列由驱动定义), 一个请求包含若干个bio, 一个bio又包含若干个bio_vec
bio_vec指向用户需要写入硬盘的数据, 它由如下三个参数组成:
struct bio_vec {
struct page *bv_page;
unsigned int bv_len;
unsigned int bv_offset;
}
一个bio还包含一个bvec_iter, 它由如下4个参数组成:
struct bvec_iter {
sector_t bi_sector;
unsigned int bi_size;
unsigned int bi_idx;
unsigned int bi_bvec_done;
};
通过bio, 再结合其中的bio_iter就可以找到当前的bio_vec.
用户可能发出若干对硬盘的操作, 也就对应着若干个bio, 操作系统能够按照一定的算法将这些操作重新组合成一个请求, 硬盘执行这个请求就能够以最高的效率将数据读取/写入.
以上操作仅适用于机械硬盘, 因为机械硬盘按照扇区顺序读写能够达到最高效率. 对于RAMDISK, ZRAM等可以随机访问的设备, 请求队列是没有必要的, 因此不需要请求队列.
网络设备驱动模板
static void xxx_rx (struct net_device *dev)
{
struct xxx_priv *priv = netdev_priv(dev);
struct sk_buff *skb;
int length;
length = get_rev_len(...);
skb = dev_alloc_skb(length + 2);
skb_reserve(skb, 2);
skb->dev = dev;
...
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
dev->last_rx = jiffies;
...
}
static void xxx_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
status = ior(...);
switch (status) {
case IRQ_RECEIVER_ENENT:
xxx_rx(dev);
break;
...
}
}
static void xxx_timeout (struct net_device *dev)
{
netif_stop_queue(dev);
...
netif_wake_queue(dev);
}
static xxx_start_xmit (struct sk_buf *skb, struct net_device *dev)
{
int len;
char *data, shortpkt[ETH_ZLEN];
if (xxx_send_available()) {
data = skb->data;
len = skb->len;
if (len < ETH_ZLEN) {
memset(shortpkt, 0, ETH_ZLEN);
memcpy(shortpkt, skb->data, skb->len);
len = ETH_ZLEN;
data = shortpkt;
}
}
dev->trans_start = jiffies;
if (...) {
xxx_hw_tx(data, len, dev);
} else {
netif_stop_queue(dev);
...
}
}
static int xxx_open(struct net_device *dev)
{
...
ret = request_irq(dev->irq, &xxx_interrupt, 0, dev->name, dev);
...
netif_start_queue(dev);
...
}
static const struct net_device_ops xxx_netdev_ops = {
.ndo_open = xxx_open,
.ndo_stop = xxx_stop,
.ndo_start_xmit = xxx_start_xmit,
.ndo_tx_timeout = xxx_timeout,
.ndo_do_ioctl = xxx_ioctl,
...
}
struct net_device *ndev;
struct xxx_priv *priv;
ndev = alloc_etherdev(sizeof(struct xxx_priv));
xxx_hw_init();
ndev->netdev_ops = &xxx_netdev_ops;
ndev->ethtool_ops = &xxx_ethtool_ops;
ndev->watchdog_timeo = timeout;
priv = netdev_priv(ndev);
...
register_netdev(ndev);
unregister_netdev(ndev);
free_netdev(ndev);
本文内容由网友自发贡献,版权归原作者所有,本站不承担相应法律责任。如您发现有涉嫌抄袭侵权的内容,请联系:hwhale#tublm.com(使用前将#替换为@)