Skip to content
Permalink
Browse files

x86: remove unused and x86 only latency benchmark

We do have a multi-architecture latency benchmark now, this one was x86
only, was never used or compiled in and is out-dated.

Signed-off-by: Anas Nashif <anas.nashif@intel.com>
  • Loading branch information...
nashif authored and andrewboie committed Jun 2, 2019
1 parent 34b0516 commit 76d9d7806d7b68f8325ac823fcbcdee18b2618cf
@@ -44,7 +44,6 @@ extern u64_t __idle_time_stamp; /* timestamp when CPU went idle */
*/
void k_cpu_idle(void)
{
z_int_latency_stop();
z_sys_trace_idle();
#if defined(CONFIG_BOOT_TIME_MEASUREMENT)
__idle_time_stamp = (u64_t)k_cycle_get_32();
@@ -75,7 +74,6 @@ void k_cpu_idle(void)

void k_cpu_atomic_idle(unsigned int key)
{
z_int_latency_stop();
z_sys_trace_idle();

__asm__ volatile (
@@ -96,7 +94,6 @@ void k_cpu_atomic_idle(unsigned int key)

/* restore interrupt lockout state before returning to caller */
if ((key & 0x200U) == 0U) {
z_int_latency_start();
__asm__ volatile("cli");
}
}
@@ -37,10 +37,6 @@
#endif


#ifdef CONFIG_INT_LATENCY_BENCHMARK
GTEXT(z_int_latency_start)
GTEXT(z_int_latency_stop)
#endif
/**
*
* @brief Inform the kernel of an interrupt
@@ -135,24 +131,12 @@ SECTION_FUNC(TEXT, _interrupt_enter)
pushl %edi


#if defined(CONFIG_INT_LATENCY_BENCHMARK) || \
defined(CONFIG_TRACING)
#if defined(CONFIG_TRACING)

/* Save these as we are using to keep track of isr and isr_param */
pushl %eax
pushl %edx

#ifdef CONFIG_INT_LATENCY_BENCHMARK
/*
* Volatile registers are now saved it is safe to start measuring
* how long interrupt are disabled.
* The interrupt gate created by IRQ_CONNECT disables the
* interrupt.
*/

call z_int_latency_start
#endif

call z_sys_trace_isr_enter

popl %edx
@@ -193,13 +177,6 @@ SECTION_FUNC(TEXT, _interrupt_enter)
/* fall through to nested case */

alreadyOnIntStack:
#ifdef CONFIG_INT_LATENCY_BENCHMARK
pushl %eax
pushl %edx
call z_int_latency_stop
popl %edx
popl %eax
#endif

#ifndef CONFIG_X86_IAMCU
/* EAX has the interrupt handler argument, needs to go on
@@ -236,10 +213,6 @@ alreadyOnIntStack:
/* irq_controller.h interface */
_irq_controller_eoi_macro

#ifdef CONFIG_INT_LATENCY_BENCHMARK
call z_int_latency_start
#endif

/* determine whether exiting from a nested interrupt */
movl $_kernel, %ecx
decl _kernel_offset_to_nested(%ecx) /* dec interrupt nest count */
@@ -308,9 +281,6 @@ alreadyOnIntStack:
#endif /* CONFIG_LAZY_FP_SHARING */

/* Restore volatile registers and return to the interrupted thread */
#ifdef CONFIG_INT_LATENCY_BENCHMARK
call z_int_latency_stop
#endif
popl %edi
popl %ecx
popl %edx
@@ -344,10 +314,6 @@ noReschedule:
*/

nestedInterrupt:
#ifdef CONFIG_INT_LATENCY_BENCHMARK
call z_int_latency_stop
#endif

popl %edi
popl %ecx /* pop volatile registers in reverse order */
popl %edx
@@ -61,7 +61,6 @@ void z_arch_irq_direct_pm(void)

void z_arch_isr_direct_header(void)
{
z_int_latency_start();
z_sys_trace_isr_enter();

/* We're not going to unlock IRQs, but we still need to increment this
@@ -73,7 +72,6 @@ void z_arch_isr_direct_header(void)
void z_arch_isr_direct_footer(int swap)
{
z_irq_controller_eoi();
z_int_latency_stop();
sys_trace_isr_exit();
--_kernel.nested;

@@ -382,19 +382,6 @@ CROHandlingDone:
/* Utilize the 'eflags' parameter to __swap() */

pushl 4(%esp)
#ifdef CONFIG_INT_LATENCY_BENCHMARK
testl $0x200, (%esp)
jz skipIntLatencyStop

/* save %eax since it used as the return value for __swap */
pushl %eax
/* interrupts are being reenabled, stop accumulating time */
call z_int_latency_stop
/* restore __swap's %eax */
popl %eax

skipIntLatencyStop:
#endif
popfl
#if CONFIG_X86_IAMCU
/* Remember that eflags we stuck into the stack before the return
@@ -30,12 +30,6 @@ static int currently_running_irq = -1;

static inline void vector_to_irq(int irq_nbr, int *may_swap)
{
/*
* As in this architecture an irq (code) executes in 0 time,
* it is a bit senseless to call z_int_latency_start/stop()
*/
/* z_int_latency_start(); */

sys_trace_isr_enter();

if (irq_vector_table[irq_nbr].func == NULL) { /* LCOV_EXCL_BR_LINE */
@@ -59,7 +53,6 @@ static inline void vector_to_irq(int irq_nbr, int *may_swap)
}

sys_trace_isr_exit();
/* z_int_latency_stop(); */
}

/**
@@ -85,11 +85,6 @@ static inline void vector_to_irq(int irq_nbr, int *may_swap)
bs_trace_raw_time(6, "Vectoring to irq %i (%s)\n", irq_nbr,
irqnames[irq_nbr]);

/*
* As in this architecture an irq (code) executes in 0 time,
* it is a bit senseless to call z_int_latency_start/stop()
*/
/* z_int_latency_start(); */
sys_trace_isr_enter();

if (irq_vector_table[irq_nbr].func == NULL) { /* LCOV_EXCL_BR_LINE */
@@ -113,7 +108,6 @@ static inline void vector_to_irq(int irq_nbr, int *may_swap)
}

sys_trace_isr_exit();
/* z_int_latency_stop(); */

bs_trace_raw_time(7, "Irq %i (%s) ended\n", irq_nbr, irqnames[irq_nbr]);
}
@@ -51,14 +51,6 @@ extern "C" {

#ifndef _ASMLANGUAGE

#ifdef CONFIG_INT_LATENCY_BENCHMARK
void z_int_latency_start(void);
void z_int_latency_stop(void);
#else
#define z_int_latency_start() do { } while (false)
#define z_int_latency_stop() do { } while (false)
#endif

/* interrupt/exception/error related definitions */


@@ -422,8 +414,6 @@ static ALWAYS_INLINE unsigned int z_arch_irq_lock(void)
{
unsigned int key = _do_irq_lock();

z_int_latency_start();

return key;
}

@@ -448,8 +438,6 @@ static ALWAYS_INLINE void z_arch_irq_unlock(unsigned int key)
return;
}

z_int_latency_stop();

z_do_irq_unlock();
}

@@ -34,7 +34,6 @@ set_target_properties(
__ZEPHYR_SUPERVISOR__
)

target_sources_ifdef(CONFIG_INT_LATENCY_BENCHMARK kernel PRIVATE int_latency_bench.c)
target_sources_ifdef(CONFIG_STACK_CANARIES kernel PRIVATE compiler_stack_protect.c)
target_sources_ifdef(CONFIG_SYS_CLOCK_EXISTS kernel PRIVATE timeout.c timer.c)
target_sources_ifdef(CONFIG_ATOMIC_OPERATIONS_C kernel PRIVATE atomic_c.c)
@@ -332,16 +332,6 @@ config BOOT_DELAY
achieved by waiting for DCD on the serial port--however, not
all serial ports have DCD.

config INT_LATENCY_BENCHMARK
bool "Interrupt latency metrics [EXPERIMENTAL]"
depends on ARCH="x86"
help
This option enables the tracking of interrupt latency metrics;
the exact set of metrics being tracked is board-dependent.
Tracking begins when int_latency_init() is invoked by an application.
The metrics are displayed (and a new sampling interval is started)
each time int_latency_show() is called thereafter.

config EXECUTION_BENCHMARKING
bool "Timing metrics"
help

0 comments on commit 76d9d78

Please sign in to comment.
You can’t perform that action at this time.