/*
 *    linux/mm/mmap.c
 *
 * Written by obz.
 */
#include <linux/stat.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/shm.h>
#include <linux/errno.h>
#include <linux/mman.h>
#include <linux/string.h>
#include <linux/malloc.h>

#include <asm/segment.h>
#include <asm/system.h>

static int anon_map(struct inode *, struct file *,
            unsigned long, size_t, int,
            unsigned long);
/*
 * description of effects of mapping type and prot in current implementation.
 * this is due to the current handling of page faults in memory.c. the expected
 * behavior is in parens:
 *
 * map_type    prot
 *        PROT_NONE    PROT_READ    PROT_WRITE    PROT_EXEC
 * MAP_SHARED    r: (no) yes    r: (yes) yes    r: (no) yes    r: (no) no
 *        w: (no) yes    w: (no) copy    w: (yes) yes    w: (no) no
 *        x: (no) no    x: (no) no    x: (no) no    x: (yes) no
 *        
 * MAP_PRIVATE    r: (no) yes    r: (yes) yes    r: (no) yes    r: (no) no
 *        w: (no) copy    w: (no) copy    w: (copy) copy    w: (no) no
 *        x: (no) no    x: (no) no    x: (no) no    x: (yes) no
 *
 */
//代码段空间
#define CODE_SPACE(addr)    \
 (PAGE_ALIGN(addr) < current->start_code + current->end_code)

//执行映射
int do_mmap(struct file * file, unsigned long addr, unsigned long len,
    unsigned long prot, unsigned long flags, unsigned long off)
{
    int mask, error;

//对齐
    if ((len = PAGE_ALIGN(len)) == 0)
        return addr;
    
    //校验参数
    if (addr > TASK_SIZE || len > TASK_SIZE || addr > TASK_SIZE-len)
        return -EINVAL;

/*
     * do simple checking here so the lower-level routines won't have
     * to. we assume access permissions have been handled by the open
     * of the memory object, so we don't do any here.
     */
    //文件存在
    if (file != NULL)
        switch (flags & MAP_TYPE) {
        case MAP_SHARED:
            if ((prot & PROT_WRITE) && !(file->f_mode & 2))
                return -EACCES;
            /* fall through */
        case MAP_PRIVATE:
            if (!(file->f_mode & 1))
                return -EACCES;
            break;

default:
            return -EINVAL;
        }
    /*
     * obtain the address to map to. we verify (or select) it and ensure
     * that it represents a valid section of the address space.
     */
    //
    if (flags & MAP_FIXED) {
        if (addr & ~PAGE_MASK)
            return -EINVAL;
        if (len > TASK_SIZE || addr > TASK_SIZE - len)
            return -EINVAL;
    } else {
        struct vm_area_struct * vmm;

/* Maybe this works.. Ugly it is. */
        addr = SHM_RANGE_START;
        while (addr+len < SHM_RANGE_END) {
            for (vmm = current->mmap ; vmm ; vmm = vmm->vm_next) {
                if (addr >= vmm->vm_end)
                    continue;
                if (addr + len <= vmm->vm_start)
                    continue;
                addr = PAGE_ALIGN(vmm->vm_end);
                break;
            }
            if (!vmm)
                break;
        }
        if (addr+len >= SHM_RANGE_END)
            return -ENOMEM;
    }

/*
     * determine the object being mapped and call the appropriate
     * specific mapper. the address has already been validated, but
     * not unmapped, but the maps are removed from the list.
     */
    if (file && (!file->f_op || !file->f_op->mmap))
        return -ENODEV;
    mask = 0;
    if (prot & (PROT_READ | PROT_EXEC))
        mask |= PAGE_READONLY;
    if (prot & PROT_WRITE)
        if ((flags & MAP_TYPE) == MAP_PRIVATE)
            mask |= PAGE_COPY;
        else
            mask |= PAGE_SHARED;
    if (!mask)
        return -EINVAL;

do_munmap(addr, len);    /* Clear old maps */

if (file)
        error = file->f_op->mmap(file->f_inode, file, addr, len, mask, off);
    else
        error = anon_map(NULL, NULL, addr, len, mask, off);
    
    if (!error)
        return addr;

if (!current->errno)
        current->errno = -error;
    return -1;
}

//系统调用 映射
asmlinkage int sys_mmap(unsigned long *buffer)
{
    int error;
    unsigned long flags;
    struct file * file = NULL;

error = verify_area(VERIFY_READ, buffer, 6*4);
    if (error)
        return error;
    flags = get_fs_long(buffer+3);
    if (!(flags & MAP_ANONYMOUS)) {
        unsigned long fd = get_fs_long(buffer+4);
        if (fd >= NR_OPEN || !(file = current->filp[fd]))
            return -EBADF;
    }
    return do_mmap(file, get_fs_long(buffer), get_fs_long(buffer+1),
        get_fs_long(buffer+2), flags, get_fs_long(buffer+5));
}

/*
 * Normal function to fix up a mapping
 * This function is the default for when an area has no specific
 * function.  This may be used as part of a more specific routine.
 * This function works out what part of an area is affected and
 * adjusts the mapping information.  Since the actual page
 * manipulation is done in do_mmap(), none need be done here,
 * though it would probably be more appropriate.
 *
 * By the time this function is called, the area struct has been
 * removed from the process mapping list, so it needs to be
 * reinserted if necessary.
 *
 * The 4 main cases are:
 *    Unmapping the whole area
 *    Unmapping from the start of the segment to a point in it
 *    Unmapping from an intermediate point to the end
 *    Unmapping between to intermediate points, making a hole.
 *
 * Case 4 involves the creation of 2 new areas, for each side of
 * the hole.
 */
 //
void unmap_fixup(struct vm_area_struct *area,
         unsigned long addr, size_t len)
{
    struct vm_area_struct *mpnt;
    unsigned long end = addr + len;

if (addr < area->vm_start || addr >= area->vm_end ||
        end <= area->vm_start || end > area->vm_end ||
        end < addr)
    {
        printk("unmap_fixup: area=%lx-%lx, unmap %lx-%lx!!\n",
               area->vm_start, area->vm_end, addr, end);
        return;
    }

/* Unmapping the whole area */
    if (addr == area->vm_start && end == area->vm_end) {
        if (area->vm_ops && area->vm_ops->close)
            area->vm_ops->close(area);
        return;
    }

/* Work out to one of the ends */
    if (addr >= area->vm_start && end == area->vm_end)
        area->vm_end = addr;
    if (addr == area->vm_start && end <= area->vm_end) {
        area->vm_offset += (end - area->vm_start);
        area->vm_start = end;
    }

/* Unmapping a hole */
    if (addr > area->vm_start && end < area->vm_end)
    {
        /* Add end mapping -- leave beginning for below */
        mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL);

*mpnt = *area;
        mpnt->vm_offset += (end - area->vm_start);
        mpnt->vm_start = end;
        if (mpnt->vm_inode)
            mpnt->vm_inode->i_count++;
        insert_vm_struct(current, mpnt);
        area->vm_end = addr;    /* Truncate area */
    }

/* construct whatever mapping is needed */
    mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL);
    *mpnt = *area;
    insert_vm_struct(current, mpnt);
}

//保护
asmlinkage int sys_mprotect(unsigned long addr, size_t len, unsigned long prot)
{
    return -EINVAL; /* Not implemented yet */
}

//
asmlinkage int sys_munmap(unsigned long addr, size_t len)
{
    return do_munmap(addr, len);
}

/*
 * Munmap is split into 2 main parts -- this part which finds
 * what needs doing, and the areas themselves, which do the
 * work.  This now handles partial unmappings.
 * Jeremy Fitzhardine <jeremy@sw.oz.au>
 */
 //执行
int do_munmap(unsigned long addr, size_t len)
{
    struct vm_area_struct *mpnt, **npp, *free;

if ((addr & ~PAGE_MASK) || addr > TASK_SIZE || len > TASK_SIZE-addr)
        return -EINVAL;

if ((len = PAGE_ALIGN(len)) == 0)
        return 0;

/*
     * Check if this memory area is ok - put it on the temporary
     * list if so..  The checks here are pretty simple --
     * every area affected in some way (by any overlap) is put
     * on the list.  If nothing is put on, nothing is affected.
     */
    npp = &current->mmap;
    free = NULL;
    for (mpnt = *npp; mpnt != NULL; mpnt = *npp) {
        unsigned long end = addr+len;

if ((addr < mpnt->vm_start && end <= mpnt->vm_start) ||
            (addr >= mpnt->vm_end && end > mpnt->vm_end))
        {
            npp = &mpnt->vm_next;
            continue;
        }

*npp = mpnt->vm_next;
        mpnt->vm_next = free;
        free = mpnt;
    }

if (free == NULL)
        return 0;

/*
     * Ok - we have the memory areas we should free on the 'free' list,
     * so release them, and unmap the page range..
     * If the one of the segments is only being partially unmapped,
     * it will put new vm_area_struct(s) into the address space.
     */
    while (free) {
        unsigned long st, end;

mpnt = free;
        free = free->vm_next;

st = addr < mpnt->vm_start ? mpnt->vm_start : addr;
        end = addr+len;
        end = end > mpnt->vm_end ? mpnt->vm_end : end;

if (mpnt->vm_ops && mpnt->vm_ops->unmap)
            mpnt->vm_ops->unmap(mpnt, st, end-st);
        else
            unmap_fixup(mpnt, st, end-st);

kfree(mpnt);
    }

unmap_page_range(addr, len);
    return 0;
}

/* This is used for a general mmap of a disk file */
//通用磁盘文件映射
int generic_mmap(struct inode * inode, struct file * file,
    unsigned long addr, size_t len, int prot, unsigned long off)
{
      struct vm_area_struct * mpnt;
    extern struct vm_operations_struct file_mmap;
    struct buffer_head * bh;

if (prot & PAGE_RW)    /* only PAGE_COW or read-only supported right now */
        return -EINVAL;
    if (off & (inode->i_sb->s_blocksize - 1))
        return -EINVAL;
    if (!inode->i_sb || !S_ISREG(inode->i_mode))
        return -EACCES;
    if (!inode->i_op || !inode->i_op->bmap)
        return -ENOEXEC;
    if (!(bh = bread(inode->i_dev,bmap(inode,0),inode->i_sb->s_blocksize)))
        return -EACCES;
    if (!IS_RDONLY(inode)) {
        inode->i_atime = CURRENT_TIME;
        inode->i_dirt = 1;
    }
    brelse(bh);

mpnt = (struct vm_area_struct * ) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
    if (!mpnt)
        return -ENOMEM;

unmap_page_range(addr, len);    
    mpnt->vm_task = current;
    mpnt->vm_start = addr;
    mpnt->vm_end = addr + len;
    mpnt->vm_page_prot = prot;
    mpnt->vm_share = NULL;
    mpnt->vm_inode = inode;
    inode->i_count++;
    mpnt->vm_offset = off;
    mpnt->vm_ops = &file_mmap;
    insert_vm_struct(current, mpnt);
    merge_segments(current->mmap, NULL, NULL);
    
    return 0;
}

/*
 * Insert vm structure into process list
 * This makes sure the list is sorted by start address, and
 * some some simple overlap checking.
 * JSGF
 */
 //插入虚拟内存结构到进程链表中
void insert_vm_struct(struct task_struct *t, struct vm_area_struct *vmp)
{
    struct vm_area_struct **nxtpp, *mpnt;

nxtpp = &t->mmap;
    
    for(mpnt = t->mmap; mpnt != NULL; mpnt = mpnt->vm_next)
    {
        if (mpnt->vm_start > vmp->vm_start)
            break;
        nxtpp = &mpnt->vm_next;

if ((vmp->vm_start >= mpnt->vm_start &&
             vmp->vm_start < mpnt->vm_end) ||
            (vmp->vm_end >= mpnt->vm_start &&
             vmp->vm_end < mpnt->vm_end))
            printk("insert_vm_struct: ins area %lx-%lx in area %lx-%lx\n",
                   vmp->vm_start, vmp->vm_end,
                   mpnt->vm_start, vmp->vm_end);
    }
    
    vmp->vm_next = mpnt;

*nxtpp = vmp;
}

/*
 * Merge a list of memory segments if possible.
 * Redundant vm_area_structs are freed.
 * This assumes that the list is ordered by address.
 */
 //合并内存段
void merge_segments(struct vm_area_struct *mpnt,
            map_mergep_fnp mergep, void *mpd)
{
    struct vm_area_struct *prev, *next;

if (mpnt == NULL)
        return;
    
    for(prev = mpnt, mpnt = mpnt->vm_next;
        mpnt != NULL;
        prev = mpnt, mpnt = next)
    {
        int mp;

next = mpnt->vm_next;
        
        if (mergep == NULL)
        {
            unsigned long psz = prev->vm_end - prev->vm_start;
            mp = prev->vm_offset + psz == mpnt->vm_offset;
        }
        else
            mp = (*mergep)(prev, mpnt, mpd);

/*
         * Check they are compatible.
         * and the like...
         * What does the share pointer mean?
         */
        if (prev->vm_ops != mpnt->vm_ops ||
            prev->vm_page_prot != mpnt->vm_page_prot ||
            prev->vm_inode != mpnt->vm_inode ||
            prev->vm_end != mpnt->vm_start ||
            !mp ||
            prev->vm_share != mpnt->vm_share ||        /* ?? */
            prev->vm_next != mpnt)            /* !!! */
            continue;

/*
         * merge prev with mpnt and set up pointers so the new
         * big segment can possibly merge with the next one.
         * The old unused mpnt is freed.
         */
        prev->vm_end = mpnt->vm_end;
        prev->vm_next = mpnt->vm_next;
        kfree_s(mpnt, sizeof(*mpnt));
        mpnt = prev;
    }
}

/*
 * Map memory not associated with any file into a process
 * address space.  Adjecent memory is merged.
 */
 //映射内存到进程地址空间,不包含任何文件
static int anon_map(struct inode *ino, struct file * file,
            unsigned long addr, size_t len, int mask,
            unsigned long off)
{
      struct vm_area_struct * mpnt;

if (zeromap_page_range(addr, len, mask))
        return -ENOMEM;

mpnt = (struct vm_area_struct * ) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
    if (!mpnt)
        return -ENOMEM;

mpnt->vm_task = current;
    mpnt->vm_start = addr;
    mpnt->vm_end = addr + len;
    mpnt->vm_page_prot = mask;
    mpnt->vm_share = NULL;
    mpnt->vm_inode = NULL;
    mpnt->vm_offset = 0;
    mpnt->vm_ops = NULL;
    insert_vm_struct(current, mpnt);
    merge_segments(current->mmap, ignoff_mergep, NULL);

return 0;
}

/* Merge, ignoring offsets */
//合并,忽略偏移
int ignoff_mergep(const struct vm_area_struct *m1,
          const struct vm_area_struct *m2,
          void *data)
{
    //如果虚拟内存结构的虚拟内存节点不相同,则退出
    if (m1->vm_inode != m2->vm_inode)    /* Just to be sure */
        return 0;
    
    //没看懂!!!!
    return (struct inode *)data == m1->vm_inode;
}

mm/mmap.c的更多相关文章

  1. Linux内存分配小结--malloc、brk、mmap【转】

    转自:https://blog.csdn.net/gfgdsg/article/details/42709943 http://blog.163.com/xychenbaihu@yeah/blog/s ...

  2. 内存控制函数(1)-mmap() 建立内存映射

    示例1: 1.首先建立一个文本文件,名字为tmp,内容为hello world 2.编写mmap.c #include <sys/types.h> #include <sys/sta ...

  3. Linux中brk()系统调用,sbrk(),mmap(),malloc(),calloc()的异同【转】

    转自:http://blog.csdn.net/kobbee9/article/details/7397010 brk和sbrk主要的工作是实现虚拟内存到内存的映射.在GNUC中,内存分配是这样的:  ...

  4. mmap vs read

    先放个结论: 内存映射通常比随机访问更快,尤其访问的对象是分离的和不可预测的. 内存映射会持续占用pages, 直到完成访问. 这意味当长时间重度使用一个文件很久之前, 然后你关闭了它, 然后再重新打 ...

  5. Linux进程通信之mmap

    mmap()函数: void *mmap(void* addr,size_t length,int port,int flags,int fd,off_t offset); 返回:成功:返回创建的映射 ...

  6. PROC 文件系统调节参数介绍(netstat -us)

    转自:http://www.cnblogs.com/super-king/p/3296333.html /proc/net/* snmp文件 Ip: ip项 Forwarding        : 是 ...

  7. LInux_System_Call_INT_80h

    Int 80h Website (Copy from Linux-System-Call) List of Linux/i386 system calls Copyright (C) 1999-200 ...

  8. Linux Process Virtual Memory

    目录 . 简介 . 进程虚拟地址空间 . 内存映射的原理 . 数据结构 . 对区域的操作 . 地址空间 . 内存映射 . 反向映射 .堆的管理 . 缺页异常的处理 . 用户空间缺页异常的校正 . 内核 ...

  9. Linux内存管理基本概念

    1. 基本概念 1.1 地址 (1)逻辑地址:指由程序产生的与段相关的偏移地址部分.在C语言指针中,读取指针变量本身值(&操作),实际上这个值就是逻辑地址,它是相对于你当前进程数据段的地址.( ...

随机推荐

  1. (二)Kafka动态增加Topic的副本(Replication)

    (二)Kafka动态增加Topic的副本(Replication) 1. 查看topic的原来的副本分布 [hadoop@sdf-nimbus-perf ~]$ le-kafka-topics.sh ...

  2. 《BI那点儿事》数据流转换——条件性拆分

    根据条件分割数据是一个在数据流中添加复杂逻辑的方法,它允许根据条件将数据输出到其他不同的路径中.例如,可以将TotalSugar< 27.4406的输出到一个路径,TotalSugar > ...

  3. spring+mybatis

    ---恢复内容开始--- 使用SSM(Spring.SpringMVC和Mybatis)已经有三个多月了,项目在技术上已经没有什么难点了,基于现有的技术就可以实现想要的功能,当然肯定有很多可以改进的地 ...

  4. checkbox 设置不可更改

    readonly="readonly" 设置不起作用   用 onclick="return false;"

  5. linux驱动初探之杂项设备(控制两个GPIO口)

    关键字:linux驱动.杂项设备.GPIO 此驱动程序控制了外接的两个二极管,二极管是低电平有效. 上一篇博客中已经介绍了linux驱动程序的编写流程,这篇博客算是前一篇的提高篇,也是下一篇博客(JN ...

  6. Linux编程之驱动

    增加自己写的驱动程序:http://blog.chinaunix.net/uid-23065002-id-115739.html http://os.51cto.com/art/201108/2840 ...

  7. WPF仿Word头部格式,涉及DEV RibbonControl,NarvbarControl,ContentPresenter,Navigation

    时隔1个月,2015/06/17走进新的环境. 最近一个星期在学习仿Word菜单栏的WPF实现方式,废话不多说,先看一下效果. 打开界面后,默认选中[市场A],A对应的菜单栏,如上图, 选择[市场B] ...

  8. linux注销、关机、重启

    一.Logout 注销是登陆的相对操作,登陆系统后,若要离开系统,用户只要直接下达logout命令即可:[root@laolinux root]#logoutRed Hat Linux release ...

  9. 51nod 1445 变色DNA(dij)

    题目链接:51nod 1445 变色DNA 看了相关讨论再去用最短路:val[i][j]之间如果是'Y',说明i可以到达j,并且i到达j的代价是i那行 1到j-1 里面'Y'的数量. 最后,求 0到n ...

  10. WCF事务应用[转]

    在B2B的项目中,一般用户注册后,就有一个属于自己的店铺,此时,我们就要插入两张表, User和Shop表. 当然,要么插入成功,要么全失败. 第一步: 首先看一下项目的结构图: 第二步: 准备工作, ...