trace for

    技术2022-05-20  43

    转自:http://blog.chinaunix.net/space.php?uid=20286427&do=blog&id=116103

     __get_free_page is a macro, which was defined in  

    <include/linux/gfp.h> >>> #define __get_free_page(gfp_mask) /                  __get_free_pages((gfp_mask),0) >>> <mm/page_alloc.c> unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) {         struct page *page;         /*             * __get_free_pages() returns a 32-bit address, which cannot represent          * a highmem page          */         VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);         page = alloc_pages(gfp_mask, order);         if (!page)                 return 0;         return (unsigned long) page_address(page); } __get_free_pages--->alloc_pages >>> include/linux/gfp.h #ifdef CONFIG_NUMA extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order); static inline struct page * alloc_pages(gfp_t gfp_mask, unsigned int order) {                return alloc_pages_current(gfp_mask, order); } extern struct page *alloc_page_vma(gfp_t gfp_mask,                          struct vm_area_struct *vma, unsigned long addr); #else    #define alloc_pages(gfp_mask, order) /                 alloc_pages_node(numa_node_id(), gfp_mask, order) #define alloc_page_vma(gfp_mask, vma, addr) alloc_pages(gfp_mask, 0) #endif #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) __get_free_pages--->alloc_pages---> alloc_pages_node >>> include/linux/gfp.h static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,                                                 unsigned int order) {         /* Unknown node is current node */         if (nid < 0)                 nid = numa_node_id();         return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); } __get_free_pages--->alloc_pages--->alloc_pages_node--->__alloc_pages >>> include/linux/gfp.h static inline struct page * __alloc_pages(gfp_t gfp_mask, unsigned int order,                 struct zonelist *zonelist) {         return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL); } __get_free_pages--->alloc_pages--->alloc_pages_node--->__alloc_pages---> __alloc_pages_nodemask >>> mm/page_alloc.c /*  * This is the 'heart' of the zoned buddy allocator.  */ struct page * __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,                         struct zonelist *zonelist, nodemask_t *nodemask) {         enum zone_type high_zoneidx = gfp_zone(gfp_mask);         struct zone *preferred_zone;         struct page *page;         int migratetype = allocflags_to_migratetype(gfp_mask);         gfp_mask &= gfp_allowed_mask;         lockdep_trace_alloc(gfp_mask);         might_sleep_if(gfp_mask & __GFP_WAIT);         if (should_fail_alloc_page(gfp_mask, order))                 return NULL;         /*          * Check the zones suitable for the gfp_mask contain at least one          * valid zone. It's possible to have an empty zonelist as a result          * of GFP_THISNODE and a memoryless node          */         if (unlikely(!zonelist->_zonerefs->zone))                 return NULL;         /* The preferred zone is used for statistics later */         first_zones_zonelist(zonelist, high_zoneidx, nodemask, &preferred_zone);         if (!preferred_zone)                 return NULL;         /* First allocation attempt */         page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,                         zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET,                         preferred_zone, migratetype);         if (unlikely(!page))                 page = __alloc_pages_slowpath(gfp_mask, order,                                 zonelist, high_zoneidx, nodemask,                                 preferred_zone, migratetype);         trace_mm_page_alloc(page, order, gfp_mask, migratetype);         return page; } >>> include/linux/gfp.h get the zone type according to the gfp flags static inline enum zone_type gfp_zone(gfp_t flags) {                enum zone_type z;          /*            #define __GFP_DMA       ((__force gfp_t)0x01u)           #define __GFP_HIGHMEM   ((__force gfp_t)0x02u)           #define __GFP_DMA32     ((__force gfp_t)0x04u)           #define __GFP_MOVABLE   ((__force gfp_t)0x08u)  /* Page is movable */           #define GFP_ZONEMASK    (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)          */         int bit = flags & GFP_ZONEMASK;         z = (GFP_ZONE_TABLE >> (bit * ZONES_SHIFT)) &                                          ((1 << ZONES_SHIFT) - 1);                  if (__builtin_constant_p(bit))                 MAYBE_BUILD_BUG_ON((GFP_ZONE_BAD >> bit) & 1);         else { #ifdef CONFIG_DEBUG_VM                 BUG_ON((GFP_ZONE_BAD >> bit) & 1); #endif         }         return z; } >>> <include/linux/mm.h> __get_free_pages--->page_address--->lowmem_page_address //here we can find the return value is the virtual address of the page static __always_inline void *lowmem_page_address(struct page *page) {         return __va(page_to_pfn(page) << PAGE_SHIFT); } __get_free_pages--->page_address--->lowmem_page_address--->page_to_pfn #define page_to_pfn __page_to_pfn  #define __page_to_pfn(page)     ((unsigned long)((page) - mem_map) + /                                   ARCH_PFN_OFFSET) <mm/memory.c> struct page *mem_map;  //mem_map is the page array of the whole system.

     

     

    另外转一片关于page_to_pfn()和pfn_to_page()的 小文章:

    对于结构体指针+、-常数的理解(page_to_pfn和pfn_to_page)

    //对同类型的结构体指针进行+,-运算,结果是加减多少个结构体. //1.对于减运算 //两个同类型的结构体指针进行"-"运算,结果为两个单元地址空间之间一共距离多少个这种结构体 //例:page_to_pfn()函数:将mem_map_t类型的页管理单元page,转换为它所管理的页对应的物理页帧号 #define page_to_pfn(page) (((page) - mem_map) + PHYS_PFN_OFFSET) //page - mem_map=表示从mem_map到page一共有多少个mem_map_t这种结构体,即:一共有多少个页 //2.对于加运算 //对结构体指针进行"+"运算,如:mem_map + 3;结果为mem_map所在地址加上3个mem_map_t结构体大小结构块之后的最终地址 //例:pfn_to_page()函数:将物理页帧号转换为管理该页的mem_map_t类型指针page #define pfn_to_page(pfn) ((mem_map + (pfn)) - PHYS_PFN_OFFSET) //变换一种形式可以更容易理解:(mem_map + (pfn - PHYS_PFN_OFFSET)) //其中index = pfn - PHYS_PFN_OFFSET表示物理页帧号pfn对应的偏移索引号index

     


    最新回复(0)