CMA连续物理内存用户空间映射---(二)
摘要:
相对于上一篇測试程序CMA连续物理内存用户空间映射---(一)
添加功能:
1、分配和映射统一放在IOCTL,一次完毕,能够连续多次分配并映射到用户空间,提高操作性;
2、驱动添加链表,使分配的多块内存在链表中管理,方便加入删除;
3、添加内存释放和解除映射;
4、使用rmmod删除驱动模块时。将释放全部内存。
映射流程:
1、用户通过IOCTL分配大小传给驱动ioctl------------------------------------->
2、驱动依据用户是否使用 writebuffer。来使用dma_alloc_writecombine或者dma_alloc_coherent。物理内存---------------------------->
3、通过vm_mmap,在用户空间找一块空暇空间来供映射使用-------------------------------->
vm_mmap在大于linux3.7内核版本号中才干使用。在老内核中能够使用sys_mmap
參考mmap的call stack
[ 409.762850] [<c00184c4>] (unwind_backtrace+0x0/0xf8) from [<bf000020>] (cmamem_mmap+0x20/0xd0 [cma_mem])
[ 409.774141] [<bf000020>] (cmamem_mmap+0x20/0xd0 [cma_mem]) from [<c0095ab8>] (mmap_region+0x310/0x540)
[ 409.774771] [<c0095ab8>] (mmap_region+0x310/0x540) from [<c0095f80>] (do_mmap_pgoff+0x298/0x330)
[ 409.784230] [<c0095f80>] (do_mmap_pgoff+0x298/0x330) from [<c00886d0>] (vm_mmap_pgoff+0x64/0x94)
[ 409.792291] [<c00886d0>] (vm_mmap_pgoff+0x64/0x94) from [<c00947a8>] (sys_mmap_pgoff+0x54/0xa8)
[ 409.800962] [<c00947a8>] (sys_mmap_pgoff+0x54/0xa8) from [<c0013940>] (ret_fast_syscall+0x0/0x30)
4、vm_mmap将会调用驱动中的mmap接口函数
在mmap中通过remap_pfn_range实现物理内存到用户空间的映射.;
5、讲映射好的用户空间及内核空间虚拟内核和物理内存保存到链表中;
6、删除操作时,查询链表,解除映射,释放内存。从链表移除;
7、驱动模块释放时,释放全部内存;
源代码:
驱动:
cma_mem.c
#include <linux/miscdevice.h>
#include <linux/platform_device.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/mm.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/debugfs.h>
#include <linux/mempolicy.h>
#include <linux/sched.h>
#include <linux/module.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/cacheflush.h>
#include <linux/dma-mapping.h>
#include <linux/export.h>
#include <linux/syscalls.h>
#include <linux/mman.h> #include "cma_mem.h" #define DEVICE_NAME "cma_mem" #define MEM_DEBUG 1 enum cma_status{
UNKNOW_STATUS = 0,
HAVE_ALLOCED = 1,
HAVE_MMAPED =2,
}; struct cmamem_dev {
unsigned int count;
struct miscdevice dev;
struct mutex cmamem_lock;
}; struct cmamem_block {
char name[10];
char is_use_buffer;
char is_free;
int id;
unsigned long offset;
unsigned long len;
unsigned long phy_base;
unsigned long mem_base;
void *kernel_base;
struct list_head memqueue_list;
}; struct current_status{
int status;
int id_count;
dma_addr_t phy_base;
}; static struct current_status cmamem_status;
static struct cmamem_dev cmamem_dev;
static struct cmamem_block *cmamem_block_head;
static int mem_block_count = 0; static void dump_mem(struct cmamem_block *memory_block)
{
printk("%s:CMA name:%s\n",__func__, memory_block->name);
printk("%s:CMA id:%d\n",__func__, memory_block->id);
printk("%s:Is usebuf:%d\n",__func__, memory_block->is_use_buffer);
printk("%s:PHY Base:0x%08lx\n",__func__, memory_block->phy_base);
printk("%s:KER Base:0x%08x\n",__func__, (unsigned int)(memory_block->kernel_base));
printk("%s:USR Base:0x%08lx\n",__func__, memory_block->mem_base);
}
static long cmamem_alloc(struct file *file, unsigned long arg)
{
struct cmamem_block *memory_block;
struct mem_block cma_info_temp;
int size;
int ret; if ((ret = copy_from_user(&cma_info_temp, (void __user *)arg,
sizeof(struct mem_block))))
{
printk(KERN_ERR"cmamem_alloc:copy_from_user error:%d\n", ret);
return -1;
} if(cma_info_temp.name[0] == '\0')
{
printk(KERN_ERR "%s, no set mem name, please set\n", __func__);
return -1;
} if(cma_info_temp.len){ size = PAGE_ALIGN(cma_info_temp.len); cma_info_temp.len = size;
#ifdef MEM_DEBUG
// printk(KERN_INFO "%s len:%ld, is_use_buffer:%d\n", __func__, cma_info_temp.len, cma_info_temp.is_use_buffer);
#endif
if(cma_info_temp.is_use_buffer)
cma_info_temp.kernel_base = dma_alloc_writecombine(NULL, size, (dma_addr_t *)(&(cma_info_temp.phy_base)), GFP_KERNEL);
else
cma_info_temp.kernel_base = dma_alloc_coherent(NULL, size, (dma_addr_t *)(&(cma_info_temp.phy_base)), GFP_KERNEL); if (!cma_info_temp.phy_base){
printk(KERN_ERR "dma alloc fail:%d!\n", __LINE__);
return -ENOMEM;
} cma_info_temp.id = ++mem_block_count; cmamem_status.phy_base = cma_info_temp.phy_base;
cmamem_status.id_count = cma_info_temp.id;
cmamem_status.status = HAVE_ALLOCED; cma_info_temp.mem_base = vm_mmap(file, 0, size, PROT_READ | PROT_WRITE, MAP_SHARED, 0);
if(cma_info_temp.mem_base < 0)
{
printk(KERN_ERR "do_mmap fail:%d!\n", __LINE__);
cma_info_temp.id = --mem_block_count;
return -ENOMEM;
}
printk(KERN_INFO "cma_info_temp.mem_base:0x%lx\n", cma_info_temp.mem_base);
//mem_block_count ++; }
else{ printk(KERN_ERR"cmamem_alloc: the len is NULL\n");
return -1;
} if(copy_to_user((void __user *)arg, (void *)(&cma_info_temp), sizeof(struct mem_block)))
return -EFAULT; /* setup the memory block */
memory_block = (struct cmamem_block *)kmalloc(sizeof(struct cmamem_block), GFP_KERNEL);
if(memory_block == NULL)
{
printk(KERN_ERR "%s error line:%d\n", __func__, __LINE__);
mem_block_count --;
return -1;
} if(cma_info_temp.name[0] != '\0')
memcpy(memory_block->name, cma_info_temp.name, 10); memory_block->id = cma_info_temp.id;
memory_block->is_free = 0;
memory_block->is_use_buffer = cma_info_temp.is_use_buffer;
memory_block->mem_base = cma_info_temp.mem_base;
memory_block->kernel_base = cma_info_temp.kernel_base;
memory_block->phy_base = cma_info_temp.phy_base;
memory_block->len = cma_info_temp.len; #ifdef MEM_DEBUG
dump_mem(memory_block);
#endif
#ifdef CMA_TEST
int i;
for(i = 0; i < 10; i++)
((char *)(cma_info_temp.kernel_base))[i] = (cma_info_temp.id * i);
#endif
/* add to memory block queue */
list_add_tail(&memory_block->memqueue_list, &cmamem_block_head->memqueue_list); return 0;
}
static int cmamem_free(struct file *file, unsigned long arg)
{
struct cmamem_block *memory_block;
struct mem_block cma_info_temp;
int ret; if ((ret = copy_from_user(&cma_info_temp, (void __user *)arg,
sizeof(struct mem_block))))
{
printk(KERN_ERR"cmamem_alloc:copy_from_user error:%d\n", ret);
return -1;
}
printk(KERN_INFO "will delete the mem name:%s\n", cma_info_temp.name); list_for_each_entry(memory_block, &cmamem_block_head->memqueue_list, memqueue_list)
{
if(memory_block){
//if(memory_block->id == cma_info_temp.id || !strcmp(cma_info_temp.name, memory_block->name)){
if(!strcmp(cma_info_temp.name, memory_block->name)){
if(memory_block->is_free == 0){ printk(KERN_INFO "delete the mem id:%d, name:%s\n", cma_info_temp.id, cma_info_temp.name); vm_munmap(memory_block->mem_base, memory_block->len); if(memory_block->is_use_buffer)
dma_free_coherent(NULL, memory_block->len, memory_block->kernel_base, memory_block->phy_base);
else
dma_free_writecombine(NULL, memory_block->len, memory_block->kernel_base, memory_block->phy_base); memory_block->is_free = 1; list_del(&memory_block->memqueue_list); break;
} }
}
} return 0;
}
static int cmamem_freeall(void)
{
struct cmamem_block *memory_block; printk(KERN_INFO "will delete all cma mem\n"); list_for_each_entry(memory_block, &cmamem_block_head->memqueue_list, memqueue_list)
{
if(memory_block && memory_block->id > 0){
if(memory_block->is_free == 0){
printk(KERN_INFO "delete the mem id:%d, name:%s\n", memory_block->id, memory_block->name); if(memory_block->is_use_buffer)
dma_free_coherent(NULL, memory_block->len, memory_block->kernel_base, memory_block->phy_base);
else
dma_free_writecombine(NULL, memory_block->len, memory_block->kernel_base, memory_block->phy_base); memory_block->is_free = 1; }
}
} return 0;
}
static long cmamem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{ int ret = 0; switch(cmd){
case CMEM_ALLOCATE:
{
printk(KERN_ERR"cmamem_ioctl:CMEM_ALLOCATE\n");
mutex_lock(&cmamem_dev.cmamem_lock); cmamem_alloc(file, arg);
if(ret < 0)
goto alloc_err; mutex_unlock(&cmamem_dev.cmamem_lock);
break;
}
case CMEM_UNMAP:
{
printk(KERN_ERR"cmamem_ioctl:CMEM_UNMAP\n");
mutex_lock(&cmamem_dev.cmamem_lock); ret = cmamem_free(file, arg);
if(ret < 0)
goto free_err; mutex_unlock(&cmamem_dev.cmamem_lock);
break;
}
default:
{
printk(KERN_INFO "cma mem not support command\n");
break;
}
}
return 0;
alloc_err:
mutex_unlock(&cmamem_dev.cmamem_lock);
printk(KERN_ERR "%s alloc error\n", __func__);
return ret;
free_err:
mutex_unlock(&cmamem_dev.cmamem_lock);
printk(KERN_ERR "%s free error\n", __func__); return ret;
} static int cmamem_mmap(struct file *filp, struct vm_area_struct *vma)
{
unsigned long start = vma->vm_start;
unsigned long size = vma->vm_end - vma->vm_start;
unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
unsigned long page, pos; //if(size > MMAP_MEM_SIZE)
// return -EINVAL;
if(cmamem_status.status != HAVE_ALLOCED)
{
printk(KERN_ERR"%s, you should allocted memory firstly\n", __func__);
return -EINVAL;
} // printk( "cmamem_mmap:vma:start=0x%08x offset=0x%08x\n", (unsigned int)start, (unsigned int)offset ); pos = (unsigned long)cmamem_status.phy_base + offset;
page = pos >> PAGE_SHIFT ; // printk( "cmamem_status.phy_base:0x%08x\n", (unsigned int)cmamem_status.phy_base); if( remap_pfn_range( vma, start, page, size, PAGE_SHARED )) {
return -EAGAIN;
}
else{
// printk( "remap_pfn_range %u\n success\n", (unsigned int)page );
}
vma->vm_flags &= ~VM_IO;
vma->vm_flags |= (VM_DONTEXPAND | VM_DONTDUMP); cmamem_status.status = HAVE_MMAPED;
return 0;
} static struct file_operations dev_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = cmamem_ioctl,
.mmap = cmamem_mmap,
}; static int __init cmamem_init(void)
{
printk(KERN_INFO "%s\n", __func__);
mutex_init(&cmamem_dev.cmamem_lock);
//NIT_LIST_HEAD(&cmamem_dev.info_list);
cmamem_dev.count = 0;
cmamem_dev.dev.name = DEVICE_NAME;
cmamem_dev.dev.minor = MISC_DYNAMIC_MINOR;
cmamem_dev.dev.fops = &dev_fops; cmamem_block_head = (struct cmamem_block *)kmalloc(sizeof(struct cmamem_block), GFP_KERNEL);
cmamem_block_head->id = -1;
mem_block_count = 0;
INIT_LIST_HEAD(&cmamem_block_head->memqueue_list);
/*
cmamem_status.status = UNKNOW_STATUS;
cmamem_status.id_count = -1;
cmamem_status.phy_base = 0;
*/
return misc_register(&cmamem_dev.dev);
} static void __exit cmamem_exit(void)
{
printk(KERN_ERR"%s\n", __func__);
cmamem_freeall();
misc_deregister(&cmamem_dev.dev);
} module_init(cmamem_init);
module_exit(cmamem_exit);
MODULE_LICENSE("GPL");
cma_mem.h
#ifndef _CMA_MEM_H_
#define _CMA_MEM_H_ #define CMEM_IOCTL_MAGIC 'm'
#define CMEM_GET_PHYS _IOW(CMEM_IOCTL_MAGIC, 1, unsigned int)
#define CMEM_MAP _IOW(CMEM_IOCTL_MAGIC, 2, unsigned int)
#define CMEM_GET_SIZE _IOW(CMEM_IOCTL_MAGIC, 3, unsigned int)
#define CMEM_UNMAP _IOW(CMEM_IOCTL_MAGIC, 4, unsigned int) #define CMEM_ALLOCATE _IOW(CMEM_IOCTL_MAGIC, 5, unsigned int) #define CMEM_CONNECT _IOW(CMEM_IOCTL_MAGIC, 6, unsigned int) #define CMEM_GET_TOTAL_SIZE _IOW(CMEM_IOCTL_MAGIC, 7, unsigned int)
#define CMEM_CACHE_FLUSH _IOW(CMEM_IOCTL_MAGIC, 8, unsigned int) struct mem_block {
char name[10];
char is_use_buffer;
int id;
unsigned long offset;
unsigned long len;
unsigned long phy_base;
unsigned long mem_base;
void *kernel_base;
}; #endif
用户測试程序:
#include <stdio.h>
#include <stdarg.h>
#include <string.h>
#include <errno.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <time.h>
#include <sys/mman.h>
#include <assert.h>
#include <linux/videodev2.h>
#include <linux/fb.h>
#include <pthread.h>
#include <poll.h>
#include <semaphore.h> #define CMEM_IOCTL_MAGIC 'm'
#define CMEM_GET_PHYS _IOW(CMEM_IOCTL_MAGIC, 1, unsigned int)
#define CMEM_MAP _IOW(CMEM_IOCTL_MAGIC, 2, unsigned int)
#define CMEM_GET_SIZE _IOW(CMEM_IOCTL_MAGIC, 3, unsigned int)
#define CMEM_UNMAP _IOW(CMEM_IOCTL_MAGIC, 4, unsigned int) #define CMEM_ALLOCATE _IOW(CMEM_IOCTL_MAGIC, 5, unsigned int) #define CMEM_CONNECT _IOW(CMEM_IOCTL_MAGIC, 6, unsigned int) #define CMEM_GET_TOTAL_SIZE _IOW(CMEM_IOCTL_MAGIC, 7, unsigned int)
#define CMEM_CACHE_FLUSH _IOW(CMEM_IOCTL_MAGIC, 8, unsigned int) struct cmamem_info {
char name[10];
char is_use_buffer;
int id;
unsigned long offset;
unsigned long len;
unsigned long phy_base;
unsigned long mem_base;
void *kernel_base;
}; struct mem_block {
char name[10];
char is_use_buffer;
int id;
unsigned long offset;
unsigned long len;
unsigned long phy_base;
unsigned long mem_base;
void *kernel_base;
}; int main()
{
int cmem_fd;
void *cmem_base;
unsigned int size;
struct mem_block region;
int i,j;
char str[10]; memset(®ion, 0x00, sizeof(struct mem_block)); cmem_fd = open("/dev/cma_mem", O_RDWR, 0);//打开设备,为了操作硬件引擎。要noncache的
printf("cmem_fd:%d\n", cmem_fd);
j = 0;
if (cmem_fd >= 0)
while(j <= 2)
{
j++;
sprintf(str, "mem%d", j);
memset(®ion, 0x00, sizeof(struct mem_block));
region.len = 800 * 480 * 4;
region.is_use_buffer = 1;
memcpy(region.name, str, strlen(str));
printf("sizeof(struct mem_block):%d\n", sizeof(struct mem_block));
printf("region.mem_base:0x%08x\n", region.mem_base);
if (ioctl(cmem_fd, CMEM_ALLOCATE, ®ion) < 0) //获取所有空间
{
perror("PMEM_GET_TOTAL_SIZE failed\n");
return -1;
} //size = region.len;
printf("region.len:0x%08x offset:0x%08x\n",region.len, region.offset);
printf("region.mem_base:0x%08x\n", region.mem_base);
for(i = 0; i < 10; i++)
printf("%d\n", ((char *)(region.mem_base))[i]);
/* cmem_base = mmap(0, size, PROT_READ|PROT_WRITE, MAP_SHARED, cmem_fd, 0);//mmap操作 if (cmem_base == MAP_FAILED)
{ cmem_base = 0;
close(cmem_fd);
cmem_fd = -1;
perror("mmap pmem error!\n");
}
for(i = 0; i < 10; i++)
((unsigned int *)cmem_base)[i] = i;
printf("pmem_base:0x%08x\n", cmem_base);
for(i = 0; i < 10; i++)
printf("%d\n", ((unsigned int *)cmem_base)[i]);
*/
printf("\n\n ********************* \n\n");
}
printf("free the mem\n");
getchar();
j = 0;
/* while(j <= 2)//释放測试
{
j++;
sprintf(str, "mem%d", j);
memset(®ion, 0x00, sizeof(struct mem_block));
region.id = j;
region.is_use_buffer = 1;
memcpy(region.name, str, strlen(str));
printf("user will del:%s, id = %d\n", str, region.id);
if (ioctl(cmem_fd, CMEM_UNMAP, ®ion) < 0) //获取所有空间
{
perror("PMEM_GET_TOTAL_SIZE failed\n");
return -1;
}
} getchar();*/
close(cmem_fd);
return 0;
}
CMA连续物理内存用户空间映射---(二)的更多相关文章
- CMA连续物理内存用户空间映射---(一)
背景: 在多媒体和图像处理等应用中,经经常使用到大块内存,尤其是硬件编解码.须要内核分配大块的物理连续内存. 这里希望通过把从内核分配的连续物理内存映射到用户空间.在用户空间经过处理,又能够入队到驱动 ...
- Linux块设备驱动(二) _MTD驱动及其用户空间编程
MTD(Memory Technology Device)即常说的Flash等使用存储芯片的存储设备,MTD子系统对应的是块设备驱动框架中的设备驱动层,可以说,MTD就是针对Flash设备设计的标准化 ...
- Linux用户空间与内核空间(理解高端内存)
Linux 操作系统和驱动程序运行在内核空间,应用程序运行在用户空间,两者不能简单地使用指针传递数据,因为Linux使用的虚拟内存机制,用户空间的数据可能被换出,当内核空间使用用户空间指针时,对应的数 ...
- Linux用户空间与内核空间
源:http://blog.csdn.net/f22jay/article/details/7925531 Linux 操作系统和驱动程序运行在内核空间,应用程序运行在用户空间,两者不能简单地使用指针 ...
- linux内核--进程空间(二)
内核处理管理本身的内存外,还必须管理用户空间进程的内存.我们称这个内存为进程地址空间,也就是系统中每个用户空间进程所看到的内存.linux操作系统采用虚拟内存技术,因此,系统中的所有进程之间虚 ...
- linux 用户空间与内核空间——高端内存详解
摘要:Linux 操作系统和驱动程序运行在内核空间,应用程序运行在用户空间,两者不能简单地使用指针传递数据,因为Linux使用的虚拟内存机制,用户空间的数据可能被换出,当内核空间使用用户空间指针时,对 ...
- linux内存管理-内核用户空间 【转】
转自:http://blog.chinaunix.net/uid-25909619-id-4491362.html 1,linux内存管理中几个重要的结构体和数组 page unsigned long ...
- Linux内存管理--用户空间和内核空间【转】
本文转载自:http://blog.csdn.net/yusiguyuan/article/details/12045255 关于虚拟内存有三点需要注意: 4G的进程地址空间被人为的分为两个部分--用 ...
- linux内存管理--用户空间和内核空间
关于虚拟内存有三点需要注意: 4G的进程地址空间被人为的分为两个部分--用户空间与内核空间.用户空间从0到3G(0xc0000000),内核空间占据3G到4G.用户进程通常情况下只能访问用户空间的虚拟 ...
随机推荐
- vue指令v-bind
v-bind用于绑定 html 属性,通常会将v-bind缩写(如"v-bind:class"可缩写成":class"): v-bind除了可以绑定字符串类型变 ...
- [ Openstack ] Openstack-Mitaka 高可用之 memcache
目录 Openstack-Mitaka 高可用之 概述 Openstack-Mitaka 高可用之 环境初始化 Openstack-Mitaka 高可用之 Mariadb-Galera集群 ...
- 【C++】多重继承
1. 多重继承时的二义性 当使用多重继承时,如果多个父类都定义了相同名字的变量,则会出现二义性.解决方法:使用 :: 声明作用域 #include <iostream> using nam ...
- 子类构造函数 supper关键字
在导出类的构造函数,如果没有明确指定调用哪一个基类构造器,它会默默调用默认构造器. 如果不存在默认构造器,编译器就会报错. java编程思想 p158(p194)
- Selenium2+python自动化53-unittest批量执行(discover)【转载】
前言 我们在写用例的时候,单个脚本的用例好执行,那么多个脚本的时候,如何批量执行呢?这时候就需要用到unittet里面的discover方法来加载用例了. 加载用例后,用unittest里面的Text ...
- ubuntu 18.04下安装编译的KMS,依赖库
libboost-system1.65.1 libglib2.0-0 libgstreamer-plugins-base1.0-0 libgstreamer1.0-0 libnice10 libsig ...
- 随机数问题--已知有个Random7()的函数,返回1到7随机自然数,让利用这个Random7()构造Random10()随机1~10.
Math.random()随机生成(0,1)之间的float数,Random7随机生成[1,7]之间的整数,利用Random7构造Random10的步骤: 1.生成数a:a是两次Random7的结果, ...
- MapReduce 编程模板
1.MapReduce 编程模型的5个步骤: 1)迭代,将输入数据解析成 key/value 对: 2)将解析的 key/value经过Map处理映射成另一组key/value对: 3)根据key进行 ...
- 用Python处理邮件,全文完
http://www.chinaunix.net/old_jh/55/575710.html
- 【状态压缩DP】BZOJ1087-[SCOI2005]互不侵犯King
[题目大意] 在N×N的棋盘里面放K个国王,使他们互不攻击,共有多少种摆放方案.国王能攻击到它上下左右,以及左上左下右上右下八个方向上附近的各一个格子,共8个格子. [思路] 先预处理每一行可行的状态 ...