Skip to content

Commit

Permalink
mm/page_alloc.c: replace set_dma_reserve to set_memory_reserve
Browse files Browse the repository at this point in the history
Expand the scope of the existing dma_reserve to accommodate other memory
reserves too.  Accordingly rename variable dma_reserve to
nr_memory_reserve.

set_memory_reserve() also takes a new parameter that helps to identify if
the current value needs to be incremented.

Link: http://lkml.kernel.org/r/1470330729-6273-1-git-send-email-srikar@linux.vnet.ibm.com
Signed-off-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Suggested-by: Mel Gorman <mgorman@techsingularity.net>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
Cc: Hari Bathini <hbathini@linux.vnet.ibm.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Balbir Singh <bsingharora@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
  • Loading branch information
srikard authored and hnaz committed Sep 1, 2016
1 parent 610d002 commit 61df630
Show file tree
Hide file tree
Showing 3 changed files with 14 additions and 10 deletions.
2 changes: 1 addition & 1 deletion arch/x86/kernel/e820.c
Original file line number Diff line number Diff line change
Expand Up @@ -1188,6 +1188,6 @@ void __init memblock_find_dma_reserve(void)
nr_free_pages += end_pfn - start_pfn;
}

set_dma_reserve(nr_pages - nr_free_pages);
set_memory_reserve(nr_pages - nr_free_pages, false);
#endif
}
2 changes: 1 addition & 1 deletion include/linux/mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -1913,7 +1913,7 @@ extern int __meminit __early_pfn_to_nid(unsigned long pfn,
struct mminit_pfnnid_cache *state);
#endif

extern void set_dma_reserve(unsigned long new_dma_reserve);
extern void set_memory_reserve(unsigned long nr_reserve, bool inc);
extern void memmap_init_zone(unsigned long, int, unsigned long,
unsigned long, enum memmap_context);
extern void setup_per_zone_wmarks(void);
Expand Down
20 changes: 12 additions & 8 deletions mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -254,7 +254,7 @@ int watermark_scale_factor = 10;

static unsigned long __meminitdata nr_kernel_pages;
static unsigned long __meminitdata nr_all_pages;
static unsigned long __meminitdata dma_reserve;
static unsigned long __meminitdata nr_memory_reserve;

#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
Expand Down Expand Up @@ -5812,10 +5812,10 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat)
}

/* Account for reserved pages */
if (j == 0 && freesize > dma_reserve) {
freesize -= dma_reserve;
if (j == 0 && freesize > nr_memory_reserve) {
freesize -= nr_memory_reserve;
printk(KERN_DEBUG " %s zone: %lu pages reserved\n",
zone_names[0], dma_reserve);
zone_names[0], nr_memory_reserve);
}

if (!is_highmem_idx(j))
Expand Down Expand Up @@ -6501,8 +6501,9 @@ void __init mem_init_print_info(const char *str)
}

/**
* set_dma_reserve - set the specified number of pages reserved in the first zone
* @new_dma_reserve: The number of pages to mark reserved
* set_memory_reserve - set number of pages reserved in the first zone
* @nr_reserve: The number of pages to mark reserved
* @inc: true increment to existing value; false set new value.
*
* The per-cpu batchsize and zone watermarks are determined by managed_pages.
* In the DMA zone, a significant percentage may be consumed by kernel image
Expand All @@ -6511,9 +6512,12 @@ void __init mem_init_print_info(const char *str)
* first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
* smaller per-cpu batchsize.
*/
void __init set_dma_reserve(unsigned long new_dma_reserve)
void __init set_memory_reserve(unsigned long nr_reserve, bool inc)
{
dma_reserve = new_dma_reserve;
if (inc)
nr_memory_reserve += nr_reserve;
else
nr_memory_reserve = nr_reserve;
}

void __init free_area_init(unsigned long *zones_size)
Expand Down

0 comments on commit 61df630

Please sign in to comment.