diff --git a/app/UserPreferences.m b/app/UserPreferences.m index 7612f98bba..c7916a7b93 100644 --- a/app/UserPreferences.m +++ b/app/UserPreferences.m @@ -7,6 +7,13 @@ #import "UserPreferences.h" #import "fs/proc/ish.h" +#include "sync.h" +#include "task.h" + +// Stuff to allow for cleaning up when doEnableExtraLocking is disabled. -mke +extern bool doEnableExtraLocking; +extern lock_t pids_lock; +extern struct list alive_pids_list; // IMPORTANT: If you add a constant here and expose it via UserPreferences, // consider if it also needs to be exposed as a friendly preference and included @@ -438,6 +445,12 @@ - (void)setShouldEnableExtraLocking:(BOOL)dim { } - (BOOL)validateShouldEnableExtraLocking:(id *)value error:(NSError **)error { + // Should set task->critical_region.count to 0 for all active processes when this is set to false. Otherwise stuff blows up. -mke + if(doEnableExtraLocking == true) { // This needs to be the opposite of what you would expect because of reasons. -mke + complex_lockt(&pids_lock, 0, __FILE__, __LINE__); + dword_t res = zero_critical_regions_count(); + unlock(&pids_lock); + } return [*value isKindOfClass:NSNumber.class]; } diff --git a/kernel/task.c b/kernel/task.c index d69ea8f588..86584486be 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -88,6 +88,15 @@ dword_t get_count_of_blocked_tasks() { return res; } +dword_t zero_critical_regions_count(void) { // If doEnableExtraLocking is changed to false, we need to zero out critical_region.count for active processes + dword_t res = 0; + struct pid *pid_entry; + list_for_each_entry(&alive_pids_list, pid_entry, alive) { + pid_entry->task->critical_region.count = 0; // Bad things happen if this isn't done. -mke + } + return 0; +} + dword_t get_count_of_alive_tasks() { complex_lockt(&pids_lock, 0, __FILE__, __LINE__); dword_t res = 0; @@ -191,9 +200,6 @@ void task_destroy(struct task *task) { } list_remove(&pid->alive); - if(Ishould) - unlock(&pids_lock); - signal_pending = !!(current->pending & ~current->blocked); while((critical_region_count(task) >1) || (locks_held_count(task)) || (signal_pending)) { // Wait for now, task is in one or more critical sections, and/or has locks @@ -201,6 +207,9 @@ void task_destroy(struct task *task) { signal_pending = !!(current->blocked); // Be less stringent -mke } + if(Ishould) + unlock(&pids_lock); + free(task); } diff --git a/kernel/task.h b/kernel/task.h index b2e6f2345d..980d96b574 100644 --- a/kernel/task.h +++ b/kernel/task.h @@ -204,6 +204,7 @@ struct task *pid_get_task_zombie(dword_t id); // don't return null if the task e dword_t get_count_of_blocked_tasks(void); dword_t get_count_of_alive_tasks(void); +dword_t zero_critical_regions_count(void); #define MAX_PID (1 << 15) // oughta be enough