Skip to content

Commit

Permalink
vm: Fix vm_wp_page_for_every_region deadlock crash
Browse files Browse the repository at this point in the history
vm_wp_page_for_every_region can be called with really fun stack traces
such as:

#3  0xffffffff8101a3db in __assert_fail (assertion=<optimized out>, file=<optimized out>, line=<optimized out>, function=<optimized out>) at kernel/panic.cpp:135
#4  0xffffffff810eded7 in mutex_lock_slow_path (mutex=mutex@entry=0xffff804eaa695628, state=state@entry=5) at kernel/sched/mutex.cpp:118
#5  0xffffffff810ee014 in __mutex_lock (state=5, mutex=mutex@entry=0xffff804eaa695628) at kernel/sched/mutex.cpp:143
#6  0xffffffff8105f843 in scoped_mutex<false>::lock (this=0xffff804eaa6af938) at include/onyx/scoped_lock.h:120
#7  scoped_mutex<false>::scoped_mutex (lock=..., this=0xffff804eaa6af938) at include/onyx/scoped_lock.h:141
#8  operator() (region=0xffff804eaa63b300, __closure=0xffff804eaa6af920) at kernel/mm/vm.cpp:3475
#9  vm_object::for_every_mapping<vm_wp_page_for_every_region(page*, size_t, vm_object*)::<lambda(vm_region*)> > (c=..., this=0xffff804eaa6a8500) at include/onyx/mm/vm_object.h:114
#10 vm_wp_page_for_every_region (page=page@entry=0xffffd0001bcb08b8, page_off=0, vmo=0xffff804eaa6a8500) at kernel/mm/vm.cpp:3474
#11 0xffffffff810cc87b in pagecache_set_dirty (dirty=<optimized out>, fo=0xffff804eaa6a34a8) at kernel/fs/pagecache.cpp:61
#12 0xffffffff8106103d in flush::flush_dev::sync_one (this=0xffffffff81208370 <flush::thread_list+336>, obj=obj@entry=0xffff804eaa6a34a8) at kernel/mm/flush.cpp:75
#13 0xffffffff81061e97 in flush_sync_one (obj=obj@entry=0xffff804eaa6a34a8) at kernel/mm/flush.cpp:254
#14 0xffffffff810dc563 in operator() (off=<optimized out>, page=0xffffd0001bcb08b8, __closure=0xffff804eaa6afa1f) at kernel/fs/inode.cpp:303
#15 operator() (idx=<optimized out>, entry=<optimized out>, __closure=0xffff804eaa6afa20) at include/onyx/mm/vm_object.h:134
#16 radix_tree::for_every_entry<vm_object::for_every_page<inode_sync(inode*)::<lambda(page*, long unsigned int)> >(inode_sync(inode*)::<lambda(page*, long unsigned int)>)::<lambda(radix::rt_entry_t, long unsigned int)> > (c=..., this=<optimized out>) at include/onyx/radix.h:258
#17 vm_object::for_every_page<inode_sync(inode*)::<lambda(page*, long unsigned int)> > (c=..., this=<optimized out>) at include/onyx/mm/vm_object.h:133
#18 inode_sync (inode=inode@entry=0xffff804eaa6a8600) at kernel/fs/inode.cpp:298
#19 0xffffffff810dd1d8 in inode_release (inode=0xffff804eaa6a8600) at kernel/fs/inode.cpp:330
#20 0xffffffff810dd26e in inode_unref (ino=<optimized out>) at kernel/fs/inode.cpp:360
#21 0xffffffff810be2f6 in dentry_destroy (d=0xffff804eaa622840) at kernel/fs/dentry.cpp:138
#22 0xffffffff810be425 in dentry_put (d=<optimized out>) at kernel/fs/dentry.cpp:120
#23 0xffffffff810c4909 in fd_put (fd=0xffff804eaa61cea0) at kernel/fs/file.cpp:78
#24 0xffffffff810c4b25 in fd_put (fd=<optimized out>) at kernel/fs/file.cpp:81
#25 0xffffffff8105b700 in vm_region_destroy (region=0xffff804eaa63b300) at kernel/mm/vm.cpp:548
#26 0xffffffff8105b8cf in vm_destroy_area (region=0xffff804eaa63b300) at kernel/mm/vm.cpp:2293
#27 vm_destroy_addr_space (mm=mm@entry=0xffff804eaa695600) at kernel/mm/vm.cpp:2312
#28 0xffffffff8105bb60 in mm_address_space::~mm_address_space (this=0xffff804eaa695600, __in_chrg=<optimized out>) at kernel/mm/vm.cpp:3746
#29 mm_address_space::~mm_address_space (this=0xffff804eaa695600, __in_chrg=<optimized out>) at kernel/mm/vm.cpp:3747
#30 0xffffffff8101dfba in refcountable::unref (this=0xffff804eaa695600) at include/onyx/refcount.h:52
#31 refcountable::unref_multiple (n=<optimized out>, this=0xffff804eaa695600) at include/onyx/refcount.h:46
#32 ref_guard<mm_address_space>::operator= (r=..., this=<optimized out>) at include/onyx/refcount.h:189
#33 process_destroy_aspace () at kernel/process.cpp:758
#34 0xffffffff8101ff31 in process_exit (exit_code=2) at kernel/process.cpp:907
#35 0xffffffff810202bb in process_exit_from_signal (signum=<optimized out>) at kernel/process.cpp:727
#36 0xffffffff810260b9 in signal_default_term (signum=<optimized out>) at kernel/signal.cpp:24
#37 0xffffffff81026607 in do_default_signal (pend=<optimized out>, signum=2) at kernel/signal.cpp:149
#38 do_default_signal (signum=<optimized out>, pend=<optimized out>) at kernel/signal.cpp:133
#39 0xffffffff81026dd4 in deliver_signal (signum=signum@entry=2, pending=pending@entry=0xffff804eaa611860, regs=regs@entry=0xffff804eaa6afec0) at kernel/signal.cpp:283
#40 0xffffffff8102750c in handle_signal (regs=0xffff804eaa6afec0) at kernel/signal.cpp:375
#41 0xffffffff810f912b in syscall_signal_path () at arch/x86_64/entry.S:199

.. in the case of IO writeback. As a temporary hack, since this only
happens in this specific case, do not attempt to lock if we already hold
it.

Signed-off-by: Pedro Falcato <pedro.falcato@gmail.com>
  • Loading branch information
heatd committed Jul 19, 2023
1 parent 0c13b0c commit 0e596eb
Showing 1 changed file with 14 additions and 1 deletion.
15 changes: 14 additions & 1 deletion kernel/kernel/mm/vm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3472,7 +3472,17 @@ void vm_wp_page(struct mm_address_space *mm, void *vaddr)
void vm_wp_page_for_every_region(page *page, size_t page_off, vm_object *vmo)
{
vmo->for_every_mapping([page_off](vm_region *region) -> bool {
scoped_mutex g{region->mm->vm_lock};
/* XXX Yuck. We can be called from such stacks such as ~mm_address_space() -> dentry_destroy
* -> inode_release -> inode_sync -> pagecache_set_dirty -> vm_wp_page_for_every_region.
* SHOULDFIX. In this case, it's no problem since we already hold the lock.
*/
bool needs_release = false;
if (!mutex_holds_lock(&region->mm->vm_lock))
{
needs_release = true;
mutex_lock(&region->mm->vm_lock);
}

const size_t mapping_off = (size_t) region->offset;
const size_t mapping_size = region->pages << PAGE_SHIFT;

Expand All @@ -3483,6 +3493,9 @@ void vm_wp_page_for_every_region(page *page, size_t page_off, vm_object *vmo)
vm_wp_page(region->mm, (void *) vaddr);
}

if (needs_release)
mutex_unlock(&region->mm->vm_lock);

return true;
});
}
Expand Down

0 comments on commit 0e596eb

Please sign in to comment.