diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index dfe26fa17e95d7..f895c54c4c651f 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -355,14 +355,77 @@ static enum bp_state reserve_additional_memory(void) return BP_ECANCELED; } +static struct page *alloc_page_for_balloon(gfp_t gfp) +{ + struct page *page; + + page = alloc_page(gfp); + if (page == NULL) + return NULL; + + adjust_managed_page_count(page, -1); + xenmem_reservation_scrub_page(page); + + return page; +} + +static void add_page_to_balloon(struct page *page) +{ + xenmem_reservation_va_mapping_reset(1, &page); + balloon_append(page); +} + static void xen_online_page(struct page *page, unsigned int order) { unsigned long i, size = (1 << order); unsigned long start_pfn = page_to_pfn(page); struct page *p; + struct zone *zone; pr_debug("Online %lu pages starting at pfn 0x%lx\n", size, start_pfn); mutex_lock(&balloon_mutex); + zone = page_zone(pfn_to_page(start_pfn)); + + /* + * In case a new memory zone is going to be populated, we need to + * ensure at least one page is made available for the memory allocator. + * As the number of pages per zone is updated only after a batch of + * pages having been added, use the number of managed pages as an + * additional indicator for a new zone. + * Otherwise this zone won't be added to the zonelist resulting in the + * zone's memory not usable by the kernel. + * Add an already valid page to the balloon and replace it with the + * first page of the to be added new memory chunk. + */ + if (!populated_zone(zone) && !managed_zone(zone)) { + xen_pfn_t frame; + + pr_info("Populating new zone\n"); + + p = alloc_page_for_balloon(GFP_ATOMIC); + if (!p) { + pr_err("Failed to allocate replacement balloon page!\n"); + pr_err("New onlined memory might not be usable.\n"); + } else { + kmap_flush_unused(); + add_page_to_balloon(p); + flush_tlb_all(); + frame = xen_page_to_gfn(p); + xenmem_reservation_decrease(1, &frame); + balloon_stats.current_pages--; + } + + p = pfn_to_page(start_pfn); + frame = page_to_xen_pfn(p); + if (xenmem_reservation_increase(1, &frame) > 0) { + xenmem_reservation_va_mapping_update(1, &p, &frame); + free_reserved_page(p); + balloon_stats.current_pages++; + + start_pfn++; + size--; + } + } for (i = 0; i < size; i++) { p = pfn_to_page(start_pfn + i); balloon_append(p); @@ -452,14 +515,12 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) nr_pages = ARRAY_SIZE(frame_list); for (i = 0; i < nr_pages; i++) { - page = alloc_page(gfp); + page = alloc_page_for_balloon(gfp); if (page == NULL) { nr_pages = i; state = BP_EAGAIN; break; } - adjust_managed_page_count(page, -1); - xenmem_reservation_scrub_page(page); list_add(&page->lru, &pages); } @@ -480,11 +541,8 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) list_for_each_entry_safe(page, tmp, &pages, lru) { frame_list[i++] = xen_page_to_gfn(page); - xenmem_reservation_va_mapping_reset(1, &page); - list_del(&page->lru); - - balloon_append(page); + add_page_to_balloon(page); } flush_tlb_all();