博客
关于我
强烈建议你试试无所不能的chatGPT,快点击我
Linux Kernel 内存管理之CMA分配和释放
阅读量:4153 次
发布时间:2019-05-25

本文共 7332 字,大约阅读时间需要 24 分钟。

 
/** * cma_alloc() - allocate pages from contiguous area * @cma:   Contiguous memory region for which the allocation is performed. * @count: Requested number of pages. * @align: Requested alignment of pages (in PAGE_SIZE order). * * This function allocates part of contiguous memory on specific * contiguous memory area. */struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align){ unsigned long mask, offset; unsigned long pfn = -1; unsigned long start = 0; unsigned long bitmap_maxno, bitmap_no, bitmap_count; struct page *page = NULL; int ret;  if (!cma || !cma->count)  return NULL;  pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma,   count, align);  if (!count)  return NULL;  mask = cma_bitmap_aligned_mask(cma, align); offset = cma_bitmap_aligned_offset(cma, align); bitmap_maxno = cma_bitmap_maxno(cma); bitmap_count = cma_bitmap_pages_to_bits(cma, count);  if (bitmap_count > bitmap_maxno)  return NULL;  for (;;) {  mutex_lock(&cma->lock);  bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,    bitmap_maxno, start, bitmap_count, mask,    offset);  if (bitmap_no >= bitmap_maxno) {   mutex_unlock(&cma->lock);   break;  }  bitmap_set(cma->bitmap, bitmap_no, bitmap_count);  /*   * It's safe to drop the lock here. We've marked this region for   * our exclusive use. If the migration fails we will take the   * lock again and unmark it.   */  mutex_unlock(&cma->lock);   pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);  mutex_lock(&cma_mutex);  ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);  mutex_unlock(&cma_mutex);  if (ret == 0) {   page = pfn_to_page(pfn);   break;  }   cma_clear_bitmap(cma, pfn, count);  if (ret != -EBUSY)   break;   pr_debug("%s(): memory range at %p is busy, retrying\n",    __func__, pfn_to_page(pfn));  /* try again with a bit different memory target */  start = bitmap_no + mask + 1; }  trace_cma_alloc(pfn, page, count, align);  pr_debug("%s(): returned %p\n", __func__, page); return page;}
 
/** * alloc_contig_range() -- tries to allocate given range of pages * @start: start PFN to allocate * @end: one-past-the-last PFN to allocate * @migratetype: migratetype of the underlaying pageblocks (either *   #MIGRATE_MOVABLE or #MIGRATE_CMA).  All pageblocks *   in range must have the same migratetype and it must *   be either of the two. * * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES * aligned, however it's the caller's responsibility to guarantee that * we are the only thread that changes migrate type of pageblocks the * pages fall in. * * The PFN range must belong to a single zone. * * Returns zero on success or negative error code.  On success all * pages which PFN is in [start, end) are allocated for the caller and * need to be freed with free_contig_range(). */int alloc_contig_range(unsigned long start, unsigned long end,         unsigned migratetype){ unsigned long outer_start, outer_end; unsigned int order; int ret = 0;  struct compact_control cc = {  .nr_migratepages = 0,  .order = -1,  .zone = page_zone(pfn_to_page(start)),  .mode = MIGRATE_SYNC,  .ignore_skip_hint = true, }; INIT_LIST_HEAD(&cc.migratepages);  /*  * What we do here is we mark all pageblocks in range as  * MIGRATE_ISOLATE.  Because pageblock and max order pages may  * have different sizes, and due to the way page allocator  * work, we align the range to biggest of the two pages so  * that page allocator won't try to merge buddies from  * different pageblocks and change MIGRATE_ISOLATE to some  * other migration type.  *  * Once the pageblocks are marked as MIGRATE_ISOLATE, we  * migrate the pages from an unaligned range (ie. pages that  * we are interested in).  This will put all the pages in  * range back to page allocator as MIGRATE_ISOLATE.  *  * When this is done, we take the pages in range from page  * allocator removing them from the buddy system.  This way  * page allocator will never consider using them.  *  * This lets us mark the pageblocks back as  * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the  * aligned range but not in the unaligned, original range are  * put back to page allocator so that buddy can use them.  */  ret = start_isolate_page_range(pfn_max_align_down(start),           pfn_max_align_up(end), migratetype,           false); if (ret)  return ret;  /*  * In case of -EBUSY, we'd like to know which page causes problem.  * So, just fall through. We will check it in test_pages_isolated().  */ ret = __alloc_contig_migrate_range(&cc, start, end); if (ret && ret != -EBUSY)  goto done;  /*  * Pages from [start, end) are within a MAX_ORDER_NR_PAGES  * aligned blocks that are marked as MIGRATE_ISOLATE.  What's  * more, all pages in [start, end) are free in page allocator.  * What we are going to do is to allocate all pages from  * [start, end) (that is remove them from page allocator).  *  * The only problem is that pages at the beginning and at the  * end of interesting range may be not aligned with pages that  * page allocator holds, ie. they can be part of higher order  * pages.  Because of this, we reserve the bigger range and  * once this is done free the pages we are not interested in.  *  * We don't have to hold zone->lock here because the pages are  * isolated thus they won't get removed from buddy.  */  lru_add_drain_all(); drain_all_pages(cc.zone);  order = 0; outer_start = start; while (!PageBuddy(pfn_to_page(outer_start))) {  if (++order >= MAX_ORDER) {   outer_start = start;   break;  }  outer_start &= ~0UL << order; }  if (outer_start != start) {  order = page_order(pfn_to_page(outer_start));   /*   * outer_start page could be small order buddy page and   * it doesn't include start page. Adjust outer_start   * in this case to report failed page properly   * on tracepoint in test_pages_isolated()   */  if (outer_start + (1UL << order) <= start)   outer_start = start; }  /* Make sure the range is really isolated. */ if (test_pages_isolated(outer_start, end, false)) {  pr_info("%s: [%lx, %lx) PFNs busy\n",   __func__, outer_start, end);  ret = -EBUSY;  goto done; }  /* Grab isolated pages from freelists. */ outer_end = isolate_freepages_range(&cc, outer_start, end); if (!outer_end) {  ret = -EBUSY;  goto done; }  /* Free head and tail (if any) */ if (start != outer_start)  free_contig_range(outer_start, start - outer_start); if (end != outer_end)  free_contig_range(end, outer_end - end); done: undo_isolate_page_range(pfn_max_align_down(start),    pfn_max_align_up(end), migratetype); return ret;}   释放CMA内存: /** * cma_release() - release allocated pages * @cma:   Contiguous memory region for which the allocation is performed. * @pages: Allocated pages. * @count: Number of allocated pages. * * This function releases memory allocated by alloc_cma(). * It returns false when provided pages do not belong to contiguous area and * true otherwise. */bool cma_release(struct cma *cma, const struct page *pages, unsigned int count){ unsigned long pfn;  if (!cma || !pages)  return false;  pr_debug("%s(page %p)\n", __func__, (void *)pages);  pfn = page_to_pfn(pages);  if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)  return false;  VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);  free_contig_range(pfn, count); cma_clear_bitmap(cma, pfn, count); trace_cma_release(pfn, pages, count);  return true;}  
 

转载地址:http://jzhti.baihongyu.com/

你可能感兴趣的文章
来玩下go的http get
查看>>
队列和栈的本质区别
查看>>
matlab中inline的用法
查看>>
如何用matlab求函数的最值?
查看>>
Git从入门到放弃
查看>>
java8采用stream对集合的常用操作
查看>>
EasySwift/YXJOnePixelLine 极其方便的画出真正的一个像素的线
查看>>
Ubuntu系统上安装Nginx服务器的简单方法
查看>>
Ubuntu Linux系统下apt-get命令详解
查看>>
ubuntu 16.04 下重置 MySQL 5.7 的密码(忘记密码)
查看>>
Ubuntu Navicat for MySQL安装以及破解方案
查看>>
HTTPS那些事 用java实现HTTPS工作原理
查看>>
oracle函数trunc的使用
查看>>
MySQL 存储过程或者函数中传参数实现where id in(1,2,3,...)IN条件拼接
查看>>
java反编译
查看>>
Class.forName( )你搞懂了吗?——转
查看>>
jarFile
查看>>
EJB3.0定时发送jms(发布/定阅)方式
查看>>
EJB与JAVA BEAN_J2EE的异步消息机制
查看>>
数学等于号是=那三个横杠是什么符
查看>>