Skip to content

Commit

Permalink
mm/khugepaged: fix vm_lock/i_mmap_rwsem inversion in retract_page_tables
Browse files Browse the repository at this point in the history
Internal syscaller on linux-next reported a lock inversion cause by
vm_lock being taken after i_mmap_rwsem:

======================================================
WARNING: possible circular locking dependency detected
6.2.0-next-20230301-syzkaller #0 Not tainted
------------------------------------------------------
syz-executor115/5084 is trying to acquire lock:
ffff888078307a90 (&vma->vm_lock->lock){++++}-{3:3}, at: vma_start_write include/linux/mm.h:678 [inline]
ffff888078307a90 (&vma->vm_lock->lock){++++}-{3:3}, at: retract_page_tables mm/khugepaged.c:1826 [inline]
ffff888078307a90 (&vma->vm_lock->lock){++++}-{3:3}, at: collapse_file+0x4fa5/0x5980 mm/khugepaged.c:2204

but task is already holding lock:
ffff88801f93efa8 (&mapping->i_mmap_rwsem){++++}-{3:3}, at: i_mmap_lock_write include/linux/fs.h:468 [inline]
ffff88801f93efa8 (&mapping->i_mmap_rwsem){++++}-{3:3}, at: retract_page_tables mm/khugepaged.c:1745 [inline]
ffff88801f93efa8 (&mapping->i_mmap_rwsem){++++}-{3:3}, at: collapse_file+0x3da6/0x5980 mm/khugepaged.c:2204

retract_page_tables takes i_mmap_rwsem before exclusive mmap_lock, which
is inverse to normal order. Deadlock is avoided by try-locking mmap_lock
and skipping on failure to obtain it. Locking the VMA should use the same
locking pattern to avoid this lock inversion.

Fixes: 44a83f2 ("mm/khugepaged: write-lock VMA while collapsing a huge page")
Reported-by: syzbot+8955a9646d1a48b8be92@syzkaller.appspotmail.com
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
  • Loading branch information
surenbaghdasaryan authored and intel-lab-lkp committed Mar 3, 2023
1 parent fbcd6c9 commit 338d2ee
Show file tree
Hide file tree
Showing 2 changed files with 32 additions and 12 deletions.
39 changes: 28 additions & 11 deletions include/linux/mm.h
Expand Up @@ -676,33 +676,50 @@ static inline void vma_end_read(struct vm_area_struct *vma)
rcu_read_unlock();
}

static inline void vma_start_write(struct vm_area_struct *vma)
static bool __is_vma_write_locked(struct vm_area_struct *vma, int *mm_lock_seq)
{
int mm_lock_seq;

mmap_assert_write_locked(vma->vm_mm);

/*
* current task is holding mmap_write_lock, both vma->vm_lock_seq and
* mm->mm_lock_seq can't be concurrently modified.
*/
mm_lock_seq = READ_ONCE(vma->vm_mm->mm_lock_seq);
if (vma->vm_lock_seq == mm_lock_seq)
*mm_lock_seq = READ_ONCE(vma->vm_mm->mm_lock_seq);
return (vma->vm_lock_seq == *mm_lock_seq);
}

static inline void vma_start_write(struct vm_area_struct *vma)
{
int mm_lock_seq;

if (__is_vma_write_locked(vma, &mm_lock_seq))
return;

down_write(&vma->vm_lock->lock);
vma->vm_lock_seq = mm_lock_seq;
up_write(&vma->vm_lock->lock);
}

static inline bool vma_try_start_write(struct vm_area_struct *vma)
{
int mm_lock_seq;

if (__is_vma_write_locked(vma, &mm_lock_seq))
return true;

if (!down_write_trylock(&vma->vm_lock->lock))
return false;

vma->vm_lock_seq = mm_lock_seq;
up_write(&vma->vm_lock->lock);
return true;
}

static inline void vma_assert_write_locked(struct vm_area_struct *vma)
{
mmap_assert_write_locked(vma->vm_mm);
/*
* current task is holding mmap_write_lock, both vma->vm_lock_seq and
* mm->mm_lock_seq can't be concurrently modified.
*/
VM_BUG_ON_VMA(vma->vm_lock_seq != READ_ONCE(vma->vm_mm->mm_lock_seq), vma);
int mm_lock_seq;

VM_BUG_ON_VMA(!__is_vma_write_locked(vma, &mm_lock_seq), vma);
}

static inline void vma_mark_detached(struct vm_area_struct *vma, bool detached)
Expand Down
5 changes: 4 additions & 1 deletion mm/khugepaged.c
Expand Up @@ -1795,6 +1795,10 @@ static int retract_page_tables(struct address_space *mapping, pgoff_t pgoff,
result = SCAN_PTE_MAPPED_HUGEPAGE;
if ((cc->is_khugepaged || is_target) &&
mmap_write_trylock(mm)) {
/* trylock for the same lock inversion as above */
if (!vma_try_start_write(vma))
goto unlock_next;

/*
* Re-check whether we have an ->anon_vma, because
* collapse_and_free_pmd() requires that either no
Expand Down Expand Up @@ -1823,7 +1827,6 @@ static int retract_page_tables(struct address_space *mapping, pgoff_t pgoff,
result = SCAN_PTE_UFFD_WP;
goto unlock_next;
}
vma_start_write(vma);
collapse_and_free_pmd(mm, vma, addr, pmd);
if (!cc->is_khugepaged && is_target)
result = set_huge_pmd(vma, addr, pmd, hpage);
Expand Down

0 comments on commit 338d2ee

Please sign in to comment.