Skip to content

Commit

Permalink
mm: fix a race on nr_swap_pages
Browse files Browse the repository at this point in the history
commit b50da6e upstream.

The scenario on which "Free swap = -4kB" happens in my system, which is caused
by several get_swap_pages racing with each other and show_swap_cache_info
happens simutaniously. No need to add a lock on get_swap_page_of_type as we
remove "Presub/PosAdd" here.

ProcessA			ProcessB			ProcessC
ngoals = 1			ngoals = 1
avail = nr_swap_pages(1)	avail = nr_swap_pages(1)
nr_swap_pages(1) -= ngoals
				nr_swap_pages(0) -= ngoals
								nr_swap_pages = -1

Link: https://lkml.kernel.org/r/1607050340-4535-1-git-send-email-zhaoyang.huang@unisoc.com
Signed-off-by: Zhaoyang Huang <zhaoyang.huang@unisoc.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  • Loading branch information
Zhaoyang Huang authored and gregkh committed Jan 30, 2021
1 parent c11f774 commit f472a59
Showing 1 changed file with 6 additions and 5 deletions.
11 changes: 6 additions & 5 deletions mm/swapfile.c
Expand Up @@ -1045,16 +1045,18 @@ int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_size)
/* Only single cluster request supported */
WARN_ON_ONCE(n_goal > 1 && size == SWAPFILE_CLUSTER);

spin_lock(&swap_avail_lock);

avail_pgs = atomic_long_read(&nr_swap_pages) / size;
if (avail_pgs <= 0)
if (avail_pgs <= 0) {
spin_unlock(&swap_avail_lock);
goto noswap;
}

n_goal = min3((long)n_goal, (long)SWAP_BATCH, avail_pgs);

atomic_long_sub(n_goal * size, &nr_swap_pages);

spin_lock(&swap_avail_lock);

start_over:
node = numa_node_id();
plist_for_each_entry_safe(si, next, &swap_avail_heads[node], avail_lists[node]) {
Expand Down Expand Up @@ -1128,14 +1130,13 @@ swp_entry_t get_swap_page_of_type(int type)

spin_lock(&si->lock);
if (si->flags & SWP_WRITEOK) {
atomic_long_dec(&nr_swap_pages);
/* This is called for allocating swap entry, not cache */
offset = scan_swap_map(si, 1);
if (offset) {
atomic_long_dec(&nr_swap_pages);
spin_unlock(&si->lock);
return swp_entry(type, offset);
}
atomic_long_inc(&nr_swap_pages);
}
spin_unlock(&si->lock);
fail:
Expand Down

0 comments on commit f472a59

Please sign in to comment.