forked from xvisor/xvisor
/
vmm_spinlocks.c
83 lines (76 loc) · 2.24 KB
/
vmm_spinlocks.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
/**
* Copyright (c) 2010 Himanshu Chauhan.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* @file vmm_spinlocks.c
* @author Himanshu Chauhan (hchauhan@nulltrace.org)
* @brief header file for spinlock synchronization mechanisms.
*/
#include <arch_cpu_irq.h>
#include <arch_locks.h>
#include <vmm_error.h>
#include <vmm_scheduler.h>
#include <vmm_spinlocks.h>
bool __lock vmm_spin_lock_check(vmm_spinlock_t * lock)
{
#if defined(CONFIG_SMP)
/* Call CPU specific locking routine */
return arch_spin_lock_check(&lock->__the_lock);
#else
return FALSE;
#endif
}
void __lock vmm_spin_lock(vmm_spinlock_t * lock)
{
/* Disable preemption in scheduler */
vmm_scheduler_preempt_disable();
#if defined(CONFIG_SMP)
/* Call CPU specific locking routine */
arch_spin_lock(&lock->__the_lock);
#endif
}
void __lock vmm_spin_unlock(vmm_spinlock_t * lock)
{
#if defined(CONFIG_SMP)
/* Call CPU specific unlocking routine */
arch_spin_unlock(&lock->__the_lock);
#endif
/* Enable preemption in scheduler */
vmm_scheduler_preempt_enable();
}
irq_flags_t __lock vmm_spin_lock_irqsave(vmm_spinlock_t * lock)
{
irq_flags_t flags;
/* Disable and save interrupt flags*/
flags = arch_cpu_irq_save();
#if defined(CONFIG_SMP)
/* Call CPU specific locking routine */
arch_spin_lock(&lock->__the_lock);
#endif
/* Return saved interrupt flags*/
return flags;
}
void __lock vmm_spin_unlock_irqrestore(vmm_spinlock_t * lock,
irq_flags_t flags)
{
#if defined(CONFIG_SMP)
/* Call CPU specific unlocking routine */
arch_spin_unlock(&lock->__the_lock);
#endif
/* Restore saved interrupt flags */
arch_cpu_irq_restore(flags);
}