Permalink
Browse files

[CORE] Inlined spin lock APIs for better performance.

Signed-off-by: Anup Patel <anup@brainfault.org>
  • Loading branch information...
1 parent d776017 commit eb21c4bca53f186e59507f3094eef2c7f61820f3 @avpatel committed May 13, 2012
@@ -18,12 +18,15 @@
*
* @file vmm_spinlocks.h
* @author Himanshu Chauhan (hchauhan@nulltrace.org)
+ * @author Anup Patel (anup@brainfault.org)
* @brief header file for spinlock synchronization mechanisms.
*/
#ifndef __VMM_SPINLOCKS_H__
#define __VMM_SPINLOCKS_H__
+#include <arch_cpu_irq.h>
+#include <arch_locks.h>
#include <vmm_types.h>
#if defined(CONFIG_SMP)
@@ -34,10 +37,10 @@
* lock.
*/
struct vmm_spinlock {
- spinlock_t __the_lock;
+ spinlock_t __tlock;
};
-#define __SPIN_LOCK_INITIALIZER(_lptr) ARCH_SPIN_LOCK_INIT(&((_lptr)->__the_lock))
+#define __SPIN_LOCK_INITIALIZER(_lptr) ARCH_SPIN_LOCK_INIT(&((_lptr)->__tlock))
#define DEFINE_SPIN_LOCK(_lock) vmm_spinlock_t _lock = __SPIN_LOCK_INITIALIZER(&_lock);
#define DECLARE_SPIN_LOCK(_lock) vmm_spinlock_t _lock;
@@ -46,28 +49,86 @@ struct vmm_spinlock {
#else
struct vmm_spinlock {
- u32 __the_lock;
+ u32 __tlock;
};
-#define INIT_SPIN_LOCK(_lptr) ((_lptr)->__the_lock = 0)
+#define INIT_SPIN_LOCK(_lptr) ((_lptr)->__tlock = 0)
#endif
typedef struct vmm_spinlock vmm_spinlock_t;
-/** Check status of spinlock (TRUE: Locked, FALSE: Unlocked) */
-bool vmm_spin_lock_check(vmm_spinlock_t * lock);
+extern void vmm_scheduler_preempt_disable(void);
+extern void vmm_scheduler_preempt_enable(void);
-/** Lock the spinlock */
-void vmm_spin_lock(vmm_spinlock_t * lock);
+/** Check status of spinlock (TRUE: Locked, FALSE: Unlocked)
+ * PROTOTYPE: bool vmm_spin_lock_check(vmm_spinlock_t * lock)
+ */
+#if defined(CONFIG_SMP)
+#define vmm_spin_lock_check(lock) arch_spin_lock_check((lock)->__tlock)
+#else
+#define vmm_spin_lock_check(lock) FALSE
+#endif
+
+/** Lock the spinlock
+ * PROTOTYPE: void vmm_spin_lock(vmm_spinlock_t * lock)
+ */
+#if defined(CONFIG_SMP)
+#define vmm_spin_lock(lock) do { \
+ vmm_scheduler_preempt_disable(); \
+ arch_spin_lock(&(lock)->__tlock); \
+ } while (0)
+#else
+#define vmm_spin_lock(lock) do { \
+ vmm_scheduler_preempt_disable(); \
+ } while (0)
+#endif
-/** Unlock the spinlock */
-void vmm_spin_unlock(vmm_spinlock_t * lock);
+/** Unlock the spinlock
+ * PROTOTYPE: void vmm_spin_unlock(vmm_spinlock_t * lock)
+ */
+#if defined(CONFIG_SMP)
+#define vmm_spin_unlock(lock) do { \
+ arch_spin_unlock(&(lock)->__tlock); \
+ vmm_scheduler_preempt_enable(); \
+ } while (0)
+#else
+#define vmm_spin_unlock(lock) do { \
+ vmm_scheduler_preempt_enable(); \
+ } while (0)
+#endif
-/** Save irq flags and lock the spinlock */
-irq_flags_t vmm_spin_lock_irqsave(vmm_spinlock_t * lock);
+/** Save irq flags and lock the spinlock
+ * PROTOTYPE: irq_flags_t vmm_spin_lock_irqsave(vmm_spinlock_t * lock)
+ */
+#if defined(CONFIG_SMP)
+#define vmm_spin_lock_irqsave(lock, flags) \
+ do { \
+ flags = arch_cpu_irq_save(); \
+ arch_spin_lock(&(lock)->__tlock); \
+ } while (0)
+#else
+#define vmm_spin_lock_irqsave(lock, flags) \
+ do { \
+ flags = arch_cpu_irq_save(); \
+ } while (0)
+#endif
-/** Unlock the spinlock and restore irq flags */
-void vmm_spin_unlock_irqrestore(vmm_spinlock_t * lock, irq_flags_t flags);
+/** Unlock the spinlock and restore irq flags
+ * PROTOTYPE: void vmm_spin_unlock_irqrestore(vmm_spinlock_t * lock,
+ irq_flags_t flags)
+ */
+#if defined(CONFIG_SMP)
+#define vmm_spin_unlock_irqrestore(lock, flags) \
+ do { \
+ arch_spin_unlock(&(lock)->__tlock); \
+ arch_cpu_irq_restore(flags); \
+ } while (0)
+#else
+#define vmm_spin_unlock_irqrestore(lock, flags) \
+ do { \
+ arch_cpu_irq_restore(flags); \
+ } while (0)
+#endif
#endif /* __VMM_SPINLOCKS_H__ */
View
@@ -25,7 +25,6 @@ core-objs-y+= vmm_main.o
core-objs-y+= vmm_ringbuf.o
core-objs-y+= vmm_stdio.o
core-objs-y+= vmm_string.o
-core-objs-y+= vmm_spinlocks.o
core-objs-y+= vmm_devtree.o
core-objs-y+= vmm_host_irq.o
core-objs-y+= vmm_host_ram.o
View
@@ -137,7 +137,7 @@ int vmm_host_irq_register(u32 hirq_num,
struct vmm_host_irq *irq;
struct vmm_host_irq_hndl *hirq;
if (hirq_num < ARCH_HOST_IRQ_COUNT) {
- flags = vmm_spin_lock_irqsave(&hirqctrl.lock);
+ vmm_spin_lock_irqsave(&hirqctrl.lock, flags);
irq = &hirqctrl.irq[hirq_num];
found = FALSE;
list_for_each(l, &irq->hndl_list) {
@@ -175,7 +175,7 @@ int vmm_host_irq_unregister(u32 hirq_num,
struct vmm_host_irq *irq;
struct vmm_host_irq_hndl * hirq;
if (hirq_num < ARCH_HOST_IRQ_COUNT) {
- flags = vmm_spin_lock_irqsave(&hirqctrl.lock);
+ vmm_spin_lock_irqsave(&hirqctrl.lock, flags);
irq = &hirqctrl.irq[hirq_num];
found = FALSE;
list_for_each(l, &irq->hndl_list) {
View
@@ -74,7 +74,7 @@ static int vmm_manager_vcpu_state_change(struct vmm_vcpu *vcpu, u32 new_state)
return rc;
}
- flags = vmm_spin_lock_irqsave(&vcpu->lock);
+ vmm_spin_lock_irqsave(&vcpu->lock, flags);
switch(new_state) {
case VMM_VCPU_STATE_RESET:
if ((vcpu->state != VMM_VCPU_STATE_RESET) &&
@@ -149,7 +149,7 @@ int vmm_manager_vcpu_dumpreg(struct vmm_vcpu * vcpu)
int rc = VMM_EFAIL;
irq_flags_t flags;
if (vcpu) {
- flags = vmm_spin_lock_irqsave(&vcpu->lock);
+ vmm_spin_lock_irqsave(&vcpu->lock, flags);
if (vcpu->state != VMM_VCPU_STATE_RUNNING) {
arch_vcpu_regs_dump(vcpu);
rc = VMM_OK;
@@ -164,7 +164,7 @@ int vmm_manager_vcpu_dumpstat(struct vmm_vcpu * vcpu)
int rc = VMM_EFAIL;
irq_flags_t flags;
if (vcpu) {
- flags = vmm_spin_lock_irqsave(&vcpu->lock);
+ vmm_spin_lock_irqsave(&vcpu->lock, flags);
if (vcpu->state != VMM_VCPU_STATE_RUNNING) {
arch_vcpu_stat_dump(vcpu);
rc = VMM_OK;
@@ -193,7 +193,7 @@ struct vmm_vcpu * vmm_manager_vcpu_orphan_create(const char *name,
}
/* Acquire lock */
- flags = vmm_spin_lock_irqsave(&mngr.lock);
+ vmm_spin_lock_irqsave(&mngr.lock, flags);
/* Find the next available vcpu */
found = 0;
@@ -283,7 +283,7 @@ int vmm_manager_vcpu_orphan_destroy(struct vmm_vcpu * vcpu)
}
/* Acquire lock */
- flags = vmm_spin_lock_irqsave(&mngr.lock);
+ vmm_spin_lock_irqsave(&mngr.lock, flags);
/* Decrement vcpu count */
mngr.vcpu_count--;
@@ -505,7 +505,7 @@ struct vmm_guest * vmm_manager_guest_create(struct vmm_devtree_node * gnode)
}
/* Acquire lock */
- flags = vmm_spin_lock_irqsave(&mngr.lock);
+ vmm_spin_lock_irqsave(&mngr.lock, flags);
/* Ensure guest node uniqueness */
list_for_each(l1, &mngr.guest_list) {
@@ -696,7 +696,7 @@ int vmm_manager_guest_destroy(struct vmm_guest * guest)
vmm_manager_guest_reset(guest);
/* Acquire lock */
- flags = vmm_spin_lock_irqsave(&mngr.lock);
+ vmm_spin_lock_irqsave(&mngr.lock, flags);
/* Decrement guest count */
mngr.guest_count--;
View
@@ -87,7 +87,7 @@ static void __notrace vmm_profile_enter(void *ip, void *parent_ip)
goto out;
}
- flags = vmm_spin_lock_irqsave(&pctrl.lock);
+ vmm_spin_lock_irqsave(&pctrl.lock, flags);
pctrl.stat[index].counter++;
pctrl.stat[index].is_tracing = 1;
@@ -123,7 +123,7 @@ static void __notrace vmm_profile_exit(void *ip, void *parent_ip)
goto out;
}
- flags = vmm_spin_lock_irqsave(&pctrl.lock);
+ vmm_spin_lock_irqsave(&pctrl.lock, flags);
time = vmm_timer_timestamp();
View
@@ -1,83 +0,0 @@
-/**
- * Copyright (c) 2010 Himanshu Chauhan.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * @file vmm_spinlocks.c
- * @author Himanshu Chauhan (hchauhan@nulltrace.org)
- * @brief header file for spinlock synchronization mechanisms.
- */
-
-#include <arch_cpu_irq.h>
-#include <arch_locks.h>
-#include <vmm_error.h>
-#include <vmm_scheduler.h>
-#include <vmm_spinlocks.h>
-
-bool __lock vmm_spin_lock_check(vmm_spinlock_t * lock)
-{
-#if defined(CONFIG_SMP)
- /* Call CPU specific locking routine */
- return arch_spin_lock_check(&lock->__the_lock);
-#else
- return FALSE;
-#endif
-}
-
-void __lock vmm_spin_lock(vmm_spinlock_t * lock)
-{
- /* Disable preemption in scheduler */
- vmm_scheduler_preempt_disable();
-#if defined(CONFIG_SMP)
- /* Call CPU specific locking routine */
- arch_spin_lock(&lock->__the_lock);
-#endif
-}
-
-void __lock vmm_spin_unlock(vmm_spinlock_t * lock)
-{
-#if defined(CONFIG_SMP)
- /* Call CPU specific unlocking routine */
- arch_spin_unlock(&lock->__the_lock);
-#endif
- /* Enable preemption in scheduler */
- vmm_scheduler_preempt_enable();
-}
-
-irq_flags_t __lock vmm_spin_lock_irqsave(vmm_spinlock_t * lock)
-{
- irq_flags_t flags;
- /* Disable and save interrupt flags*/
- flags = arch_cpu_irq_save();
-#if defined(CONFIG_SMP)
- /* Call CPU specific locking routine */
- arch_spin_lock(&lock->__the_lock);
-#endif
- /* Return saved interrupt flags*/
- return flags;
-}
-
-void __lock vmm_spin_unlock_irqrestore(vmm_spinlock_t * lock,
- irq_flags_t flags)
-{
-#if defined(CONFIG_SMP)
- /* Call CPU specific unlocking routine */
- arch_spin_unlock(&lock->__the_lock);
-#endif
- /* Restore saved interrupt flags */
- arch_cpu_irq_restore(flags);
-}
-
View
@@ -162,7 +162,7 @@ struct vmm_thread *vmm_threads_id2thread(u32 tid)
found = FALSE;
/* Lock threads control */
- flags = vmm_spin_lock_irqsave(&thctrl.lock);
+ vmm_spin_lock_irqsave(&thctrl.lock, flags);
list_for_each(l, &thctrl.thread_list) {
ret = list_entry(l, struct vmm_thread, head);
@@ -197,7 +197,7 @@ struct vmm_thread *vmm_threads_index2thread(int index)
found = FALSE;
/* Lock threads control */
- flags = vmm_spin_lock_irqsave(&thctrl.lock);
+ vmm_spin_lock_irqsave(&thctrl.lock, flags);
list_for_each(l, &thctrl.thread_list) {
ret = list_entry(l, struct vmm_thread, head);
@@ -282,7 +282,7 @@ struct vmm_thread *vmm_threads_create(const char *thread_name,
}
/* Lock threads control */
- flags = vmm_spin_lock_irqsave(&thctrl.lock);
+ vmm_spin_lock_irqsave(&thctrl.lock, flags);
list_add_tail(&thctrl.thread_list, &tinfo->head);
thctrl.thread_count++;
@@ -304,7 +304,7 @@ int vmm_threads_destroy(struct vmm_thread * tinfo)
}
/* Lock threads control */
- flags = vmm_spin_lock_irqsave(&thctrl.lock);
+ vmm_spin_lock_irqsave(&thctrl.lock, flags);
list_del(&tinfo->head);
thctrl.thread_count--;
View
@@ -49,7 +49,7 @@ int vmm_waitqueue_sleep(struct vmm_waitqueue * wq)
vcpu = vmm_scheduler_current_vcpu();
/* Lock waitqueue */
- flags = vmm_spin_lock_irqsave(&wq->lock);
+ vmm_spin_lock_irqsave(&wq->lock, flags);
/* Add VCPU to waitqueue */
list_add_tail(&wq->vcpu_list, &vcpu->wq_head);
@@ -68,7 +68,7 @@ int vmm_waitqueue_sleep(struct vmm_waitqueue * wq)
/* Failed to pause VCPU so remove from waitqueue */
/* Lock waitqueue */
- flags = vmm_spin_lock_irqsave(&wq->lock);
+ vmm_spin_lock_irqsave(&wq->lock, flags);
/* Remove VCPU from waitqueue */
list_del(&vcpu->wq_head);
@@ -105,7 +105,7 @@ int vmm_waitqueue_wake(struct vmm_vcpu * vcpu)
wq = vcpu->wq_priv;
/* Lock waitqueue */
- flags = vmm_spin_lock_irqsave(&wq->lock);
+ vmm_spin_lock_irqsave(&wq->lock, flags);
/* Try to Resume VCPU */
if ((rc = vmm_manager_vcpu_resume(vcpu))) {
@@ -145,7 +145,7 @@ int vmm_waitqueue_wakefirst(struct vmm_waitqueue * wq)
BUG_ON(!wq, "%s: NULL poniter to waitqueue\n", __func__);
/* Lock waitqueue */
- flags = vmm_spin_lock_irqsave(&wq->lock);
+ vmm_spin_lock_irqsave(&wq->lock, flags);
/* Get first VCPU from waitqueue list */
l = list_pop(&wq->vcpu_list);
@@ -186,7 +186,7 @@ int vmm_waitqueue_wakeall(struct vmm_waitqueue * wq)
BUG_ON(!wq, "%s: NULL poniter to waitqueue\n", __func__);
/* Lock waitqueue */
- flags = vmm_spin_lock_irqsave(&wq->lock);
+ vmm_spin_lock_irqsave(&wq->lock, flags);
/* For each VCPU in waitqueue */
wake_count = 0;
Oops, something went wrong.

0 comments on commit eb21c4b

Please sign in to comment.