@@ -16,32 +16,32 @@

/**
* @file
* @brief AMD64 MMU context definitions.
* @brief AMD64 MMU context definitions.
*/

#ifndef __X86_MMU_H
#define __X86_MMU_H

/** Definitions of paging structure bits. */
#define X86_PTE_PRESENT (1<<0) /**< Page is present. */
#define X86_PTE_WRITE (1<<1) /**< Page is writable. */
#define X86_PTE_USER (1<<2) /**< Page is accessible in CPL3. */
#define X86_PTE_PWT (1<<3) /**< Page has write-through caching. */
#define X86_PTE_PCD (1<<4) /**< Page has caching disabled. */
#define X86_PTE_ACCESSED (1<<5) /**< Page has been accessed. */
#define X86_PTE_DIRTY (1<<6) /**< Page has been written to. */
#define X86_PTE_LARGE (1<<7) /**< Page is a large page. */
#define X86_PTE_GLOBAL (1<<8) /**< Page won't be cleared in TLB. */
#define X86_PTE_PRESENT (1<<0) /**< Page is present. */
#define X86_PTE_WRITE (1<<1) /**< Page is writable. */
#define X86_PTE_USER (1<<2) /**< Page is accessible in CPL3. */
#define X86_PTE_PWT (1<<3) /**< Page has write-through caching. */
#define X86_PTE_PCD (1<<4) /**< Page has caching disabled. */
#define X86_PTE_ACCESSED (1<<5) /**< Page has been accessed. */
#define X86_PTE_DIRTY (1<<6) /**< Page has been written to. */
#define X86_PTE_LARGE (1<<7) /**< Page is a large page. */
#define X86_PTE_GLOBAL (1<<8) /**< Page won't be cleared in TLB. */
#ifndef __ASM__
# define X86_PTE_NOEXEC (1LL<<63) /**< Page is not executable (requires NX support). */
# define X86_PTE_NOEXEC (1ull<<63) /**< Page is not executable (requires NX support). */
#else
# define X86_PTE_NOEXEC (1<<63) /**< Page is not executable (requires NX support). */
# define X86_PTE_NOEXEC (1<<63)
#endif

/** Protection flag mask. */
#define X86_PTE_PROTECT_MASK (X86_PTE_WRITE | X86_PTE_NOEXEC)
#define X86_PTE_PROTECT_MASK (X86_PTE_WRITE | X86_PTE_NOEXEC)

/** Cacheability flag mask. */
#define X86_PTE_CACHE_MASK (X86_PTE_PWT | X86_PTE_PCD)
#define X86_PTE_CACHE_MASK (X86_PTE_PWT | X86_PTE_PCD)

#endif /* __X86_MMU_H */
@@ -16,7 +16,7 @@

/**
* @file
* @brief x86 SMP definitions.
* @brief x86 SMP definitions.
*/

#ifndef __X86_SMP_H
@@ -25,8 +25,8 @@
struct cpu;

/** x86-specific SMP boot status values. */
#define SMP_BOOT_TSC_SYNC1 4 /**< Stage 1 of TSC synchronization. */
#define SMP_BOOT_TSC_SYNC2 5 /**< Stage 2 of TSC synchronization. */
#define SMP_BOOT_TSC_SYNC1 4 /**< Stage 1 of TSC synchronization. */
#define SMP_BOOT_TSC_SYNC2 5 /**< Stage 2 of TSC synchronization. */

extern void x86_smp_boot_prepare(void);
extern void x86_smp_boot(struct cpu *cpu);
@@ -16,18 +16,19 @@

/**
* @file
* @brief x86 TSC handling functions.
* @brief x86 TSC handling functions.
*/

#ifndef __X86_TSC_H
#define __X86_TSC_H

/** Read the Time Stamp Counter.
* @return Value of the TSC. */
* @return Value of the TSC. */
static inline uint64_t x86_rdtsc(void) {
uint32_t high, low;
__asm__ volatile("rdtsc" : "=a"(low), "=d"(high));
return ((uint64_t)high << 32) | low;
uint32_t high, low;

__asm__ volatile("rdtsc" : "=a"(low), "=d"(high));
return ((uint64_t)high << 32) | low;
}

extern void tsc_init_target(void);

Large diffs are not rendered by default.

Large diffs are not rendered by default.

@@ -16,7 +16,7 @@

/**
* @file
* @brief AMD64 local APIC code.
* @brief AMD64 local APIC code.
*/

#include <arch/io.h>
@@ -40,216 +40,219 @@
KBOOT_BOOLEAN_OPTION("lapic_disabled", "Disable Local APIC usage (disables SMP)", false);

/** Local APIC mapping. If NULL the LAPIC is not present. */
static volatile uint32_t *lapic_mapping = NULL;
static volatile uint32_t *lapic_mapping;

/** Local APIC base address. */
static phys_ptr_t lapic_base = 0;
static phys_ptr_t lapic_base;

/** Read from a register in the current CPU's local APIC.
* @param reg Register to read from.
* @return Value read from register. */
* @param reg Register to read from.
* @return Value read from register. */
static inline uint32_t lapic_read(unsigned reg) {
return lapic_mapping[reg];
return lapic_mapping[reg];
}

/** Write to a register in the current CPU's local APIC.
* @param reg Register to write to.
* @param value Value to write to register. */
* @param reg Register to write to.
* @param value Value to write to register. */
static inline void lapic_write(unsigned reg, uint32_t value) {
lapic_mapping[reg] = value;
lapic_mapping[reg] = value;
}

/** Send an EOI to the local APIC. */
static inline void lapic_eoi(void) {
lapic_write(LAPIC_REG_EOI, 0);
lapic_write(LAPIC_REG_EOI, 0);
}

/** Spurious interrupt handler.
* @param frame Interrupt stack frame. */
* @param frame Interrupt stack frame. */
static void lapic_spurious_interrupt(frame_t *frame) {
kprintf(LOG_DEBUG, "lapic: received spurious interrupt\n");
kprintf(LOG_DEBUG, "lapic: received spurious interrupt\n");
}

/** IPI interrupt handler.
* @param frame Interrupt stack frame. */
* @param frame Interrupt stack frame. */
static void lapic_ipi_interrupt(frame_t *frame) {
smp_ipi_handler();
lapic_eoi();
smp_ipi_handler();
lapic_eoi();
}

/** Prepare local APIC timer tick.
* @param nsecs Number of nanoseconds to tick in. */
* @param nsecs Number of nanoseconds to tick in. */
static void lapic_timer_prepare(nstime_t nsecs) {
uint32_t count = (curr_cpu->arch.lapic_timer_cv * nsecs) >> 32;
lapic_write(LAPIC_REG_TIMER_INITIAL, (count == 0 && nsecs != 0) ? 1 : count);
uint32_t count = (curr_cpu->arch.lapic_timer_cv * nsecs) >> 32;
lapic_write(LAPIC_REG_TIMER_INITIAL, (count == 0 && nsecs != 0) ? 1 : count);
}

/** Local APIC timer device. */
static timer_device_t lapic_timer_device = {
.name = "LAPIC",
.type = TIMER_DEVICE_ONESHOT,
.prepare = lapic_timer_prepare,
.name = "LAPIC",
.type = TIMER_DEVICE_ONESHOT,
.prepare = lapic_timer_prepare,
};

/** Timer interrupt handler.
* @param frame Interrupt stack frame. */
* @param frame Interrupt stack frame. */
static void lapic_timer_interrupt(frame_t *frame) {
curr_cpu->should_preempt = timer_tick();
lapic_eoi();
curr_cpu->should_preempt = timer_tick();
lapic_eoi();
}

/** Return whether the LAPIC enabled.
* @return Whether the LAPIC is enabled. */
* @return Whether the LAPIC is enabled. */
bool lapic_enabled(void) {
return lapic_mapping;
return lapic_mapping;
}

/** Get the current local APIC ID.
* @return Local APIC ID. */
* @return Local APIC ID. */
uint32_t lapic_id(void) {
return (lapic_mapping) ? (lapic_read(LAPIC_REG_APIC_ID) >> 24) : 0;
return (lapic_mapping) ? (lapic_read(LAPIC_REG_APIC_ID) >> 24) : 0;
}

/** Send an IPI.
* @param dest Destination Shorthand.
* @param id Destination local APIC ID (if APIC_IPI_DEST_SINGLE).
* @param mode Delivery Mode.
* @param vector Value of vector field. */
* @param dest Destination Shorthand.
* @param id Destination local APIC ID (if APIC_IPI_DEST_SINGLE).
* @param mode Delivery Mode.
* @param vector Value of vector field. */
void lapic_ipi(uint8_t dest, uint8_t id, uint8_t mode, uint8_t vector) {
bool state;
bool state;

/* Must perform this check to prevent problems if fatal() is called
* before we've initialized the LAPIC. */
if(!lapic_mapping)
return;
/* Must perform this check to prevent problems if fatal() is called before
* we've initialized the LAPIC. */
if (!lapic_mapping)
return;

state = local_irq_disable();
state = local_irq_disable();

/* Write the destination ID to the high part of the ICR. */
lapic_write(LAPIC_REG_ICR1, ((uint32_t)id << 24));
/* Write the destination ID to the high part of the ICR. */
lapic_write(LAPIC_REG_ICR1, ((uint32_t)id << 24));

/* Send the IPI:
* - Destination Mode: Physical.
* - Level: Assert (bit 14).
* - Trigger Mode: Edge. */
lapic_write(LAPIC_REG_ICR0, (1<<14) | ((uint32_t)dest << 18)
| ((uint32_t)mode << 8) | (uint32_t)vector);
/* Send the IPI:
* - Destination Mode: Physical.
* - Level: Assert (bit 14).
* - Trigger Mode: Edge. */
lapic_write(
LAPIC_REG_ICR0,
(1 << 14) | ((uint32_t)dest << 18) | ((uint32_t)mode << 8) | (uint32_t)vector);

/* Wait for the IPI to be sent (check Delivery Status bit). */
while(lapic_read(LAPIC_REG_ICR0) & (1<<12))
arch_cpu_spin_hint();
/* Wait for the IPI to be sent (check Delivery Status bit). */
while (lapic_read(LAPIC_REG_ICR0) & (1 << 12))
arch_cpu_spin_hint();

local_irq_restore(state);
local_irq_restore(state);
}

/** Function to calculate the LAPIC timer frequency.
* @return Calculated frequency. */
* @return Calculated frequency. */
static __init_text uint64_t calculate_lapic_frequency(void) {
uint16_t shi, slo, ehi, elo, pticks;
uint64_t end, lticks;

/* First set the PIT to rate generator mode. */
out8(0x43, 0x34);
out8(0x40, 0xFF);
out8(0x40, 0xFF);

/* Wait for the cycle to begin. */
do {
out8(0x43, 0x00);
slo = in8(0x40);
shi = in8(0x40);
} while(shi != 0xFF);

/* Kick off the LAPIC timer. */
lapic_write(LAPIC_REG_TIMER_INITIAL, 0xFFFFFFFF);

/* Wait for the high byte to drop to 128. */
do {
out8(0x43, 0x00);
elo = in8(0x40);
ehi = in8(0x40);
} while(ehi > 0x80);

/* Get the current timer value. */
end = lapic_read(LAPIC_REG_TIMER_CURRENT);

/* Calculate the differences between the values. */
lticks = 0xFFFFFFFF - end;
pticks = ((ehi << 8) | elo) - ((shi << 8) | slo);

/* Calculate frequency. */
return (lticks * 8 * PIT_BASE_FREQUENCY) / pticks;
uint16_t shi, slo, ehi, elo, pticks;
uint64_t end, lticks;

/* First set the PIT to rate generator mode. */
out8(0x43, 0x34);
out8(0x40, 0xff);
out8(0x40, 0xff);

/* Wait for the cycle to begin. */
do {
out8(0x43, 0x00);
slo = in8(0x40);
shi = in8(0x40);
} while (shi != 0xff);

/* Kick off the LAPIC timer. */
lapic_write(LAPIC_REG_TIMER_INITIAL, 0xffffffff);

/* Wait for the high byte to drop to 128. */
do {
out8(0x43, 0x00);
elo = in8(0x40);
ehi = in8(0x40);
} while (ehi > 0x80);

/* Get the current timer value. */
end = lapic_read(LAPIC_REG_TIMER_CURRENT);

/* Calculate the differences between the values. */
lticks = 0xffffffff - end;
pticks = ((ehi << 8) | elo) - ((shi << 8) | slo);

/* Calculate frequency. */
return (lticks * 8 * PIT_BASE_FREQUENCY) / pticks;
}

/** Initialize the local APIC. */
__init_text void lapic_init(void) {
uint64_t base;

/* Don't do anything if we don't have LAPIC support or have been asked
* not to use the LAPIC. */
if(!cpu_features.apic || kboot_boolean_option("lapic_disabled"))
return;

/* Get the base address of the LAPIC mapping. If bit 11 is 0, the LAPIC
* is disabled. */
base = x86_read_msr(X86_MSR_APIC_BASE);
if(!(base & (1<<11))) {
return;
} else if(cpu_features.x2apic && base & (1<<10)) {
fatal("Cannot handle LAPIC in x2APIC mode");
}

base &= 0xFFFFF000;

/* Map the LAPIC into virtual memory and register interrupt handlers. */
lapic_base = base;
lapic_mapping = phys_map(base, PAGE_SIZE, MM_BOOT);
kprintf(LOG_NOTICE, "lapic: physical location 0x%" PRIxPHYS ", mapped to %p\n",
base, lapic_mapping);

/* Install the LAPIC timer device. */
timer_device_set(&lapic_timer_device);

/* Install interrupt vectors. */
interrupt_table[LAPIC_VECT_SPURIOUS] = lapic_spurious_interrupt;
interrupt_table[LAPIC_VECT_TIMER] = lapic_timer_interrupt;
interrupt_table[LAPIC_VECT_IPI] = lapic_ipi_interrupt;
uint64_t base;

/* Don't do anything if we don't have LAPIC support or have been asked not
* to use the LAPIC. */
if (!cpu_features.apic || kboot_boolean_option("lapic_disabled"))
return;

/* Get the base address of the LAPIC mapping. If bit 11 is 0, the LAPIC is
* disabled. */
base = x86_read_msr(X86_MSR_APIC_BASE);
if (!(base & (1 << 11))) {
return;
} else if (cpu_features.x2apic && base & (1 << 10)) {
fatal("Cannot handle LAPIC in x2APIC mode");
}

base &= 0xfffff000;

/* Map the LAPIC into virtual memory and register interrupt handlers. */
lapic_base = base;
lapic_mapping = phys_map(base, PAGE_SIZE, MM_BOOT);
kprintf(
LOG_NOTICE, "lapic: physical location 0x%" PRIxPHYS ", mapped to %p\n",
base, lapic_mapping);

/* Install the LAPIC timer device. */
timer_device_set(&lapic_timer_device);

/* Install interrupt vectors. */
interrupt_table[LAPIC_VECT_SPURIOUS] = lapic_spurious_interrupt;
interrupt_table[LAPIC_VECT_TIMER] = lapic_timer_interrupt;
interrupt_table[LAPIC_VECT_IPI] = lapic_ipi_interrupt;
}

/** Initialize the local APIC on the current CPU. */
__init_text void lapic_init_percpu(void) {
if(!lapic_mapping)
return;

/* Enable the local APIC (bit 8) and set the spurious interrupt
* vector in the Spurious Interrupt Vector Register. */
lapic_write(LAPIC_REG_SPURIOUS, LAPIC_VECT_SPURIOUS | (1<<8));
lapic_write(LAPIC_REG_TIMER_DIVIDER, LAPIC_TIMER_DIV8);

/* Calculate LAPIC frequency. See comment about CPU frequency in QEMU
* in arch_cpu_early_init_percpu(), same applies here. */
if(strncmp(curr_cpu->arch.model_name, "QEMU", 4) != 0 || curr_cpu == &boot_cpu) {
curr_cpu->arch.lapic_freq = calculate_frequency(calculate_lapic_frequency);
} else {
curr_cpu->arch.lapic_freq = boot_cpu.arch.lapic_freq;
}

/* Sanity check. */
if(curr_cpu != &boot_cpu) {
if(curr_cpu->id != lapic_id())
fatal("CPU ID mismatch (detected %u, LAPIC %u)", curr_cpu->id, lapic_id());
}

/* Figure out the timer conversion factor. */
curr_cpu->arch.lapic_timer_cv = ((curr_cpu->arch.lapic_freq / 8) << 32) / 1000000000;
kprintf(LOG_NOTICE, "lapic: timer conversion factor for CPU %u is %u (freq: %" PRIu64 "MHz)\n",
curr_cpu->id, curr_cpu->arch.lapic_timer_cv,
curr_cpu->arch.lapic_freq / 1000000);

/* Accept all interrupts. */
lapic_write(LAPIC_REG_TPR, lapic_read(LAPIC_REG_TPR) & 0xFFFFFF00);

/* Enable the timer: interrupt vector, no extra bits = Unmasked/One-shot. */
lapic_write(LAPIC_REG_TIMER_INITIAL, 0);
lapic_write(LAPIC_REG_LVT_TIMER, LAPIC_VECT_TIMER);
if (!lapic_mapping)
return;

/* Enable the local APIC (bit 8) and set the spurious interrupt vector in
* the Spurious Interrupt Vector Register. */
lapic_write(LAPIC_REG_SPURIOUS, LAPIC_VECT_SPURIOUS | (1 << 8));
lapic_write(LAPIC_REG_TIMER_DIVIDER, LAPIC_TIMER_DIV8);

/* Calculate LAPIC frequency. See comment about CPU frequency in QEMU in
* arch_cpu_early_init_percpu(), same applies here. */
if (strncmp(curr_cpu->arch.model_name, "QEMU", 4) != 0 || curr_cpu == &boot_cpu) {
curr_cpu->arch.lapic_freq = calculate_frequency(calculate_lapic_frequency);
} else {
curr_cpu->arch.lapic_freq = boot_cpu.arch.lapic_freq;
}

/* Sanity check. */
if (curr_cpu != &boot_cpu) {
if (curr_cpu->id != lapic_id())
fatal("CPU ID mismatch (detected %u, LAPIC %u)", curr_cpu->id, lapic_id());
}

/* Figure out the timer conversion factor. */
curr_cpu->arch.lapic_timer_cv = ((curr_cpu->arch.lapic_freq / 8) << 32) / 1000000000;
kprintf(
LOG_NOTICE, "lapic: timer conversion factor for CPU %u is %u (freq: %" PRIu64 "MHz)\n",
curr_cpu->id, curr_cpu->arch.lapic_timer_cv,
curr_cpu->arch.lapic_freq / 1000000);

/* Accept all interrupts. */
lapic_write(LAPIC_REG_TPR, lapic_read(LAPIC_REG_TPR) & 0xffffff00);

/* Enable the timer: interrupt vector, no extra bits = Unmasked/One-shot. */
lapic_write(LAPIC_REG_TIMER_INITIAL, 0);
lapic_write(LAPIC_REG_LVT_TIMER, LAPIC_VECT_TIMER);
}
@@ -16,7 +16,7 @@

/**
* @file
* @brief AMD64 kernel linker script.
* @brief AMD64 kernel linker script.
*/

#include <arch/aspace.h>
@@ -27,93 +27,93 @@ OUTPUT_ARCH("i386:x86-64")
OUTPUT_FORMAT("elf64-x86-64")

PHDRS {
text PT_LOAD FILEHDR PHDRS FLAGS(5); /* R_X - Code + read-only data. */
data PT_LOAD FLAGS(6); /* RW_ - Data. */
init PT_LOAD FLAGS(7); /* RWX - Reclaimable init code/data. */
note PT_NOTE FLAGS(0);
text PT_LOAD FILEHDR PHDRS FLAGS(5); /* R_X - Code + read-only data. */
data PT_LOAD FLAGS(6); /* RW_ - Data. */
init PT_LOAD FLAGS(7); /* RWX - Reclaimable init code/data. */
note PT_NOTE FLAGS(0);
}

SECTIONS {
/DISCARD/ : {
*(.note.GNU-stack)
*(.note.gnu.*)
*(.comment*)
}

. = KERNEL_VIRT_BASE;
__text_seg_start = .;

. = . + SIZEOF_HEADERS;
.text : {
__text_start = .;
*(.text)
*(.text.*)
. = ALIGN(PAGE_SIZE);
__text_end = .;
} :text

.notes : {
*(.note.*)
} :text :note

. = ALIGN(PAGE_SIZE);
.rodata : {
__rodata_start = .;
*(.rodata)
*(.rodata.*)
. = ALIGN(PAGE_SIZE);
__rodata_end = .;
} :text

__text_seg_end = .;

/* Align the start of each segment to a large page boundary to allow
* the kernel to be mapped with large pages. */
. = ALIGN(LARGE_PAGE_SIZE);
__data_seg_start = .;

.data : {
__data_start = .;
*(.data)
*(.data.*)
. = ALIGN(PAGE_SIZE);
__data_end = .;
} :data

. = ALIGN(PAGE_SIZE);
.bss : {
__bss_start = .;
*(.bss)
*(.bss.*)
*(COMMON)
. = ALIGN(PAGE_SIZE);
__bss_end = .;
} :data

__data_seg_end = .;

. = ALIGN(LARGE_PAGE_SIZE);
__init_seg_start = .;

.init.text : { *(.init.text) } :init

. = ALIGN(PAGE_SIZE);
.init.data : {
*(.init.data)

. = ALIGN(32);
__initcall_start = .;
*(.init.initcalls)
__initcall_end = .;

. = ALIGN(32);
__ap_trampoline_start = .;
*(.init.trampoline)
__ap_trampoline_end = .;

. = ALIGN(PAGE_SIZE);
} :init

__init_seg_end = .;
__end = .;
/DISCARD/ : {
*(.note.GNU-stack)
*(.note.gnu.*)
*(.comment*)
}

. = KERNEL_VIRT_BASE;
__text_seg_start = .;

. = . + SIZEOF_HEADERS;
.text : {
__text_start = .;
*(.text)
*(.text.*)
. = ALIGN(PAGE_SIZE);
__text_end = .;
} :text

.notes : {
*(.note.*)
} :text :note

. = ALIGN(PAGE_SIZE);
.rodata : {
__rodata_start = .;
*(.rodata)
*(.rodata.*)
. = ALIGN(PAGE_SIZE);
__rodata_end = .;
} :text

__text_seg_end = .;

/* Align the start of each segment to a large page boundary to allow the
* kernel to be mapped with large pages. */
. = ALIGN(LARGE_PAGE_SIZE);
__data_seg_start = .;

.data : {
__data_start = .;
*(.data)
*(.data.*)
. = ALIGN(PAGE_SIZE);
__data_end = .;
} :data

. = ALIGN(PAGE_SIZE);
.bss : {
__bss_start = .;
*(.bss)
*(.bss.*)
*(COMMON)
. = ALIGN(PAGE_SIZE);
__bss_end = .;
} :data

__data_seg_end = .;

. = ALIGN(LARGE_PAGE_SIZE);
__init_seg_start = .;

.init.text : { *(.init.text) } :init

. = ALIGN(PAGE_SIZE);
.init.data : {
*(.init.data)

. = ALIGN(32);
__initcall_start = .;
*(.init.initcalls)
__initcall_end = .;

. = ALIGN(32);
__ap_trampoline_start = .;
*(.init.trampoline)
__ap_trampoline_end = .;

. = ALIGN(PAGE_SIZE);
} :init

__init_seg_end = .;
__end = .;
}

Large diffs are not rendered by default.

@@ -16,72 +16,72 @@

/**
* @file
* @brief AMD64 non-local jump functions.
* @brief AMD64 non-local jump functions.
*/

#include <x86/asm.h>

/** Context offsets. */
#define JMP_OFF_IP 0 /**< RIP offset. */
#define JMP_OFF_SP 8 /**< RSP offset. */
#define JMP_OFF_BP 16 /**< RBP offset. */
#define JMP_OFF_BX 24 /**< RBX offset. */
#define JMP_OFF_R12 32 /**< R12 offset. */
#define JMP_OFF_R13 40 /**< R13 offset. */
#define JMP_OFF_R14 48 /**< R14 offset. */
#define JMP_OFF_R15 56 /**< R15 offset. */
#define JMP_OFF_IP 0 /**< RIP offset. */
#define JMP_OFF_SP 8 /**< RSP offset. */
#define JMP_OFF_BP 16 /**< RBP offset. */
#define JMP_OFF_BX 24 /**< RBX offset. */
#define JMP_OFF_R12 32 /**< R12 offset. */
#define JMP_OFF_R13 40 /**< R13 offset. */
#define JMP_OFF_R14 48 /**< R14 offset. */
#define JMP_OFF_R15 56 /**< R15 offset. */

FUNCTION_START(initjmp)
movq %rsi, JMP_OFF_IP(%rdi)
movq %rsi, JMP_OFF_IP(%rdi)

/* Save the stack pointer, minus 8 to maintain ABI alignment
* requirements (upon entry to a function, (RSP + 8) % 16 == 0). */
addq %rcx, %rdx
subq $8, %rdx
movq %rdx, JMP_OFF_SP(%rdi)
/* Save the stack pointer, minus 8 to maintain ABI alignment requirements:
* upon entry to a function, (RSP + 8) % 16 == 0. */
addq %rcx, %rdx
subq $8, %rdx
movq %rdx, JMP_OFF_SP(%rdi)

/* Clear frame pointer. */
movq $0, JMP_OFF_BP(%rdi)
ret
/* Clear frame pointer. */
movq $0, JMP_OFF_BP(%rdi)
ret
FUNCTION_END(initjmp)

FUNCTION_START(setjmp)
/* Save instruction/stack pointers. */
movq (%rsp), %rax
movq %rax, JMP_OFF_IP(%rdi)
movq %rsp, JMP_OFF_SP(%rdi)
/* Save instruction/stack pointers. */
movq (%rsp), %rax
movq %rax, JMP_OFF_IP(%rdi)
movq %rsp, JMP_OFF_SP(%rdi)

/* Save callee-save registers. */
movq %rbp, JMP_OFF_BP(%rdi)
movq %rbx, JMP_OFF_BX(%rdi)
movq %r12, JMP_OFF_R12(%rdi)
movq %r13, JMP_OFF_R13(%rdi)
movq %r14, JMP_OFF_R14(%rdi)
movq %r15, JMP_OFF_R15(%rdi)
/* Save callee-save registers. */
movq %rbp, JMP_OFF_BP(%rdi)
movq %rbx, JMP_OFF_BX(%rdi)
movq %r12, JMP_OFF_R12(%rdi)
movq %r13, JMP_OFF_R13(%rdi)
movq %r14, JMP_OFF_R14(%rdi)
movq %r15, JMP_OFF_R15(%rdi)

/* Return 0 from setjmp(). */
xorl %eax, %eax
ret
/* Return 0 from setjmp(). */
xorl %eax, %eax
ret
FUNCTION_END(setjmp)

FUNCTION_START(longjmp)
/* Restore new callee-save registers. */
movq JMP_OFF_R15(%rdi), %r15
movq JMP_OFF_R14(%rdi), %r14
movq JMP_OFF_R13(%rdi), %r13
movq JMP_OFF_R12(%rdi), %r12
movq JMP_OFF_BX(%rdi), %rbx
movq JMP_OFF_BP(%rdi), %rbp
/* Restore new callee-save registers. */
movq JMP_OFF_R15(%rdi), %r15
movq JMP_OFF_R14(%rdi), %r14
movq JMP_OFF_R13(%rdi), %r13
movq JMP_OFF_R12(%rdi), %r12
movq JMP_OFF_BX(%rdi), %rbx
movq JMP_OFF_BP(%rdi), %rbp

/* Restore new instruction/stack pointers. */
movq JMP_OFF_SP(%rdi), %rsp
movq JMP_OFF_IP(%rdi), %rax
movq %rax, (%rsp)
/* Restore new instruction/stack pointers. */
movq JMP_OFF_SP(%rdi), %rsp
movq JMP_OFF_IP(%rdi), %rax
movq %rax, (%rsp)

/* Cause setjmp() to return the specified value (change 0 to 1 if given). */
test %esi, %esi
jnz 1f
inc %esi
1: movl %esi, %eax
ret
/* Cause setjmp() to return the specified value (change 0 to 1 if given). */
test %esi, %esi
jnz 1f
inc %esi
1: movl %esi, %eax
ret
FUNCTION_END(longjmp)
@@ -16,7 +16,7 @@

/**
* @file
* @brief AMD64 SMP support.
* @brief AMD64 SMP support.
*/

#include <arch/barrier.h>
@@ -43,113 +43,114 @@
static mmu_context_t *ap_mmu_context;

/** Page reserved to copy the AP bootstrap code to. */
static phys_ptr_t ap_bootstrap_page = 0;
static phys_ptr_t ap_bootstrap_page;

/** Send an IPI interrupt to a single CPU.
* @param dest Destination CPU ID. */
* @param dest Destination CPU ID. */
void arch_smp_ipi(cpu_id_t dest) {
lapic_ipi(LAPIC_IPI_DEST_SINGLE, (uint32_t)dest, LAPIC_IPI_FIXED, LAPIC_VECT_IPI);
lapic_ipi(LAPIC_IPI_DEST_SINGLE, (uint32_t)dest, LAPIC_IPI_FIXED, LAPIC_VECT_IPI);
}

/** Prepare the SMP boot process. */
__init_text void x86_smp_boot_prepare(void) {
void *mapping;

/* Allocate a low memory page for the trampoline code. */
phys_alloc(PAGE_SIZE, 0, 0, 0, 0x100000, MM_BOOT, &ap_bootstrap_page);

/* Copy the trampoline code to the page reserved by the paging
* initialization code. */
mapping = phys_map(ap_bootstrap_page, PAGE_SIZE, MM_BOOT);
memcpy(mapping, __ap_trampoline_start, __ap_trampoline_end - __ap_trampoline_start);
phys_unmap(mapping, PAGE_SIZE, false);

/* Create a temporary MMU context for APs to use while booting which
* identity maps the bootstrap code at its physical location. */
ap_mmu_context = mmu_context_create(MM_BOOT);
mmu_context_lock(ap_mmu_context);
mmu_context_map(ap_mmu_context, (ptr_t)ap_bootstrap_page,
ap_bootstrap_page,
VM_ACCESS_READ | VM_ACCESS_WRITE | VM_ACCESS_EXECUTE, MM_BOOT);
mmu_context_unlock(ap_mmu_context);
void *mapping;

/* Allocate a low memory page for the trampoline code. */
phys_alloc(PAGE_SIZE, 0, 0, 0, 0x100000, MM_BOOT, &ap_bootstrap_page);

/* Copy the trampoline code to the page reserved by the paging
* initialization code. */
mapping = phys_map(ap_bootstrap_page, PAGE_SIZE, MM_BOOT);
memcpy(mapping, __ap_trampoline_start, __ap_trampoline_end - __ap_trampoline_start);
phys_unmap(mapping, PAGE_SIZE, false);

/* Create a temporary MMU context for APs to use while booting which
* identity maps the bootstrap code at its physical location. */
ap_mmu_context = mmu_context_create(MM_BOOT);
mmu_context_lock(ap_mmu_context);
mmu_context_map(
ap_mmu_context,
(ptr_t)ap_bootstrap_page,
ap_bootstrap_page,
VM_ACCESS_READ | VM_ACCESS_WRITE | VM_ACCESS_EXECUTE,
MM_BOOT);
mmu_context_unlock(ap_mmu_context);
}

/** Start the target CPU and wait until it is alive.
* @param id CPU ID to boot.
* @return Whether the CPU responded in time. */
* @param id CPU ID to boot.
* @return Whether the CPU responded in time. */
static __init_text bool boot_cpu_and_wait(cpu_id_t id) {
nstime_t delay;

/* Send an INIT IPI to the AP to reset its state and delay 10ms. */
lapic_ipi(LAPIC_IPI_DEST_SINGLE, id, LAPIC_IPI_INIT, 0x00);
spin(MSECS2NSECS(10));

/* Send a SIPI. The vector argument specifies where to look for the
* bootstrap code, as the SIPI will start execution from 0x000VV000,
* where VV is the vector specified in the IPI. We don't do what the
* MP Specification says here because QEMU assumes that if a CPU is
* halted (even by the 'hlt' instruction) then it can accept SIPIs.
* If the CPU reaches the idle loop before the second SIPI is sent, it
* will fault. */
lapic_ipi(LAPIC_IPI_DEST_SINGLE, id, LAPIC_IPI_SIPI, ap_bootstrap_page >> 12);
spin(MSECS2NSECS(10));

/* If the CPU is up, then return. */
if(smp_boot_status > SMP_BOOT_INIT)
return true;

/* Send a second SIPI and then check in 10ms intervals to see if it
* has booted. If it hasn't booted after 5 seconds, fail. */
lapic_ipi(LAPIC_IPI_DEST_SINGLE, id, LAPIC_IPI_SIPI, ap_bootstrap_page >> 12);
for(delay = 0; delay < SECS2NSECS(5); delay += MSECS2NSECS(10)) {
if(smp_boot_status > SMP_BOOT_INIT)
return true;

spin(MSECS2NSECS(10));
}

return false;
nstime_t delay;

/* Send an INIT IPI to the AP to reset its state and delay 10ms. */
lapic_ipi(LAPIC_IPI_DEST_SINGLE, id, LAPIC_IPI_INIT, 0x00);
spin(msecs_to_nsecs(10));

/* Send a SIPI. The vector argument specifies where to look for the
* bootstrap code, as the SIPI will start execution from 0x000VV000, where
* VV is the vector specified in the IPI. We don't do what the MP
* Specification says here because QEMU assumes that if a CPU is halted
* (even by the 'hlt' instruction) then it can accept SIPIs. If the CPU
* reaches the idle loop before the second SIPI is sent, it will fault. */
lapic_ipi(LAPIC_IPI_DEST_SINGLE, id, LAPIC_IPI_SIPI, ap_bootstrap_page >> 12);
spin(msecs_to_nsecs(10));

/* If the CPU is up, then return. */
if (smp_boot_status > SMP_BOOT_INIT)
return true;

/* Send a second SIPI and then check in 10ms intervals to see if it has
* booted. If it hasn't booted after 5 seconds, fail. */
lapic_ipi(LAPIC_IPI_DEST_SINGLE, id, LAPIC_IPI_SIPI, ap_bootstrap_page >> 12);
for (delay = 0; delay < secs_to_nsecs(5); delay += msecs_to_nsecs(10)) {
if (smp_boot_status > SMP_BOOT_INIT)
return true;

spin(msecs_to_nsecs(10));
}

return false;
}

/** Boot a secondary CPU.
* @param cpu CPU to boot. */
* @param cpu CPU to boot. */
__init_text void x86_smp_boot(cpu_t *cpu) {
void *mapping;

kprintf(LOG_DEBUG, "cpu: booting CPU %" PRIu32 "...\n", cpu->id);
assert(lapic_enabled());

/* Allocate a double fault stack for the new CPU. This is also used as
* the initial stack while initializing the AP, before it enters the
* scheduler. */
cpu->arch.double_fault_stack = kmem_alloc(KSTACK_SIZE, MM_BOOT);

/* Fill in details required by the bootstrap code. */
mapping = phys_map(ap_bootstrap_page, PAGE_SIZE, MM_BOOT);
*(uint64_t *)(mapping + 16) = (ptr_t)kmain_secondary;
*(uint64_t *)(mapping + 24) = (ptr_t)cpu;
*(uint64_t *)(mapping + 32) = (ptr_t)cpu->arch.double_fault_stack + KSTACK_SIZE;
*(uint32_t *)(mapping + 40) = (ptr_t)ap_mmu_context->arch.pml4;
memory_barrier();
phys_unmap(mapping, PAGE_SIZE, false);

/* Kick the CPU into life. */
if(!boot_cpu_and_wait(cpu->id))
fatal("CPU %" PRIu32 " timed out while booting", cpu->id);

/* The TSC of the AP must be synchronised against the boot CPU. */
tsc_init_source();

/* Finally, wait for the CPU to complete its initialization. */
while(smp_boot_status != SMP_BOOT_BOOTED)
arch_cpu_spin_hint();
void *mapping;

kprintf(LOG_DEBUG, "cpu: booting CPU %" PRIu32 "...\n", cpu->id);
assert(lapic_enabled());

/* Allocate a double fault stack for the new CPU. This is also used as the
* initial stack while initializing the AP, before it enters the scheduler. */
cpu->arch.double_fault_stack = kmem_alloc(KSTACK_SIZE, MM_BOOT);

/* Fill in details required by the bootstrap code. */
mapping = phys_map(ap_bootstrap_page, PAGE_SIZE, MM_BOOT);
*(uint64_t *)(mapping + 16) = (ptr_t)kmain_secondary;
*(uint64_t *)(mapping + 24) = (ptr_t)cpu;
*(uint64_t *)(mapping + 32) = (ptr_t)cpu->arch.double_fault_stack + KSTACK_SIZE;
*(uint32_t *)(mapping + 40) = (ptr_t)ap_mmu_context->arch.pml4;
memory_barrier();
phys_unmap(mapping, PAGE_SIZE, false);

/* Kick the CPU into life. */
if (!boot_cpu_and_wait(cpu->id))
fatal("CPU %" PRIu32 " timed out while booting", cpu->id);

/* The TSC of the AP must be synchronised against the boot CPU. */
tsc_init_source();

/* Finally, wait for the CPU to complete its initialization. */
while (smp_boot_status != SMP_BOOT_BOOTED)
arch_cpu_spin_hint();
}

/** Clean up after secondary CPUs have been booted. */
__init_text void x86_smp_boot_cleanup(void) {
/* Destroy the temporary MMU context. */
mmu_context_destroy(ap_mmu_context);
/* Destroy the temporary MMU context. */
mmu_context_destroy(ap_mmu_context);

/* Free the bootstrap page. */
phys_free(ap_bootstrap_page, PAGE_SIZE);
/* Free the bootstrap page. */
phys_free(ap_bootstrap_page, PAGE_SIZE);
}
@@ -16,38 +16,38 @@

/**
* @file
* @brief AMD64 context switch function.
* @brief AMD64 context switch function.
*/

#include <x86/asm.h>

/** Perform a thread context switch.
* @param %rdi New stack pointer.
* @param %rsi Location to store old stack pointer at. */
* @param %rdi New stack pointer.
* @param %rsi Location to store old stack pointer at. */
FUNCTION_START(amd64_context_switch)
/* Just need to save callee-save registers: RBP, RBX, R12-15. */
push %rbp
push %rbx
push %r12
push %r13
push %r14
push %r15
/* Just need to save callee-save registers: RBP, RBX, R12-15. */
push %rbp
push %rbx
push %r12
push %r13
push %r14
push %r15

/* Save the current stack pointer. */
movq %rsp, (%rsi)
/* Save the current stack pointer. */
movq %rsp, (%rsi)
FUNCTION_START(amd64_context_restore)
/* Set stack pointer. */
movq %rdi, %rsp
/* Set stack pointer. */
movq %rdi, %rsp

/* Restore callee-save registers. */
pop %r15
pop %r14
pop %r13
pop %r12
pop %rbx
pop %rbp
/* Restore callee-save registers. */
pop %r15
pop %r14
pop %r13
pop %r12
pop %rbx
pop %rbp

/* Stack pointer points at return address, just ret. */
ret
/* Stack pointer points at return address, just ret. */
ret
FUNCTION_END(amd64_context_switch)
FUNCTION_END(amd64_context_restore)
FUNCTION_END(amd64_context_restore)

Large diffs are not rendered by default.

@@ -16,19 +16,18 @@

/**
* @file
* @brief AMD64 time handling functions.
* @brief AMD64 time handling functions.
*
* @todo Handle systems where the TSC is not invariant. We
* should use the HPET or PIT on such systems.
* @todo Because I'm lazy this is only microsecond resolution
* at the moment. Doing nanosecond resolution requires
* some fixed point maths fun. Something along the lines
* of:
* cv_factor = (cpu_freq << 32) / ns_per_sec;
* time = (tsc << 32) / ns_per_sec;
* The problem with this, however, is that you lose the
* top 32 bits of the TSC, which is really not very
* useful.
* TODO:
* - Handle systems where the TSC is not invariant. We should use the HPET or
* PIT on such systems.
* - Because I'm lazy this is only microsecond resolution at the moment. Doing
* nanosecond resolution requires some fixed point maths fun. Something along
* the lines of:
* cv_factor = (cpu_freq << 32) / ns_per_sec;
* time = (tsc << 32) / ns_per_sec;
* The problem with this, however, is that you lose the top 32 bits of the
* TSC, which is really not very useful.
*/

#include <x86/cpu.h>
@@ -44,89 +43,85 @@
static volatile nstime_t system_time_sync __init_data;

/** Get the system time (number of nanoseconds since boot).
* @return Number of nanoseconds since system was booted. */
* @return Number of nanoseconds since system was booted. */
nstime_t system_time(void) {
uint64_t usecs;
uint64_t usecs;

preempt_disable();
preempt_disable();
usecs = (x86_rdtsc() - curr_cpu->arch.system_time_offset) / curr_cpu->arch.cycles_per_us;
preempt_enable();

usecs = (x86_rdtsc() - curr_cpu->arch.system_time_offset)
/ curr_cpu->arch.cycles_per_us;

preempt_enable();
return USECS2NSECS(usecs);
return usecs_to_nsecs(usecs);
}

/** Spin for a certain amount of time.
* @param nsecs Nanoseconds to spin for. */
* @param nsecs Nanoseconds to spin for. */
void spin(nstime_t nsecs) {
uint64_t usecs, start, target, current, cycles_per_us;
cpu_id_t id;

usecs = NSECS2USECS(nsecs);

preempt_disable();

while(true) {
id = curr_cpu->id;
cycles_per_us = curr_cpu->arch.cycles_per_us;
start = x86_rdtsc();
target = start + (usecs * cycles_per_us);

while(true) {
current = x86_rdtsc();
if(current >= target) {
preempt_enable();
return;
}

preempt_enable();
arch_cpu_spin_hint();
preempt_disable();

/* We may have been migrated to a different CPU.
* Recalculate the target. This can lose accuracy, but
* we can only end up waiting too long rather than not
* long enough. This is acceptable. */
if(id != curr_cpu->id) {
usecs -= (current - start) / cycles_per_us;
break;
}
}
}
uint64_t usecs, start, target, current, cycles_per_us;
cpu_id_t id;

usecs = nsecs_to_usecs(nsecs);

preempt_disable();

while (true) {
id = curr_cpu->id;
cycles_per_us = curr_cpu->arch.cycles_per_us;
start = x86_rdtsc();
target = start + (usecs * cycles_per_us);

while (true) {
current = x86_rdtsc();
if (current >= target) {
preempt_enable();
return;
}

preempt_enable();
arch_cpu_spin_hint();
preempt_disable();

/* We may have been migrated to a different CPU. Recalculate the
* target. This can lose accuracy, but we can only end up waiting
* too long rather than not long enough. This is acceptable. */
if (id != curr_cpu->id) {
usecs -= (current - start) / cycles_per_us;
break;
}
}
}
}

/** Set up the boot time offset. */
__init_text void tsc_init_target(void) {
/* Calculate the offset to subtract from the TSC when calculating the
* system time. For the boot CPU, this is the current value of the TSC,
* so the system time at this point is 0. For other CPUs, we need to
* synchronise against the boot CPU so system_time() reads the same
* value on all CPUs. */
if(curr_cpu == &boot_cpu) {
curr_cpu->arch.system_time_offset = x86_rdtsc();
} else {
/* Tell the boot CPU that we're here. */
smp_boot_status = SMP_BOOT_TSC_SYNC1;

/* Wait for it to store its system_time() value. */
while(smp_boot_status != SMP_BOOT_TSC_SYNC2)
arch_cpu_spin_hint();

/* Calculate the offset we need to use. */
curr_cpu->arch.system_time_offset =
-((NSECS2USECS(system_time_sync) * curr_cpu->arch.cycles_per_us)
- x86_rdtsc());
}
/* Calculate the offset to subtract from the TSC when calculating the
* system time. For the boot CPU, this is the current value of the TSC, so
* the system time at this point is 0. For other CPUs, we need to
* synchronise against the boot CPU so system_time() reads the same value
* on all CPUs. */
if (curr_cpu == &boot_cpu) {
curr_cpu->arch.system_time_offset = x86_rdtsc();
} else {
/* Tell the boot CPU that we're here. */
smp_boot_status = SMP_BOOT_TSC_SYNC1;

/* Wait for it to store its system_time() value. */
while (smp_boot_status != SMP_BOOT_TSC_SYNC2)
arch_cpu_spin_hint();

/* Calculate the offset we need to use. */
curr_cpu->arch.system_time_offset =
-((nsecs_to_usecs(system_time_sync) * curr_cpu->arch.cycles_per_us) - x86_rdtsc());
}
}

/** Boot CPU side of TSC initialization. */
__init_text void tsc_init_source(void) {
/* Wait for the AP to get into tsc_init_target(). */
while(smp_boot_status != SMP_BOOT_TSC_SYNC1)
arch_cpu_spin_hint();
/* Wait for the AP to get into tsc_init_target(). */
while (smp_boot_status != SMP_BOOT_TSC_SYNC1)
arch_cpu_spin_hint();

/* Save our system_time() value. */
system_time_sync = system_time();
smp_boot_status = SMP_BOOT_TSC_SYNC2;
/* Save our system_time() value. */
system_time_sync = system_time();
smp_boot_status = SMP_BOOT_TSC_SYNC2;
}
@@ -16,7 +16,7 @@

/**
* @file
* @brief Kernel console functions.
* @brief Kernel console functions.
*/

#include <io/device.h>
@@ -41,99 +41,96 @@ console_t debug_console;

/** Initialize the debug console. */
__init_text void console_early_init(void) {
kboot_tag_video_t *video = kboot_tag_iterate(KBOOT_TAG_VIDEO, NULL);
kboot_tag_video_t *video = kboot_tag_iterate(KBOOT_TAG_VIDEO, NULL);

platform_console_early_init(video);
platform_console_early_init(video);

if(!main_console.out) {
/* Look for a framebuffer console. */
if(video && video->type == KBOOT_VIDEO_LFB)
fb_console_early_init(video);
}
if (!main_console.out) {
/* Look for a framebuffer console. */
if (video && video->type == KBOOT_VIDEO_LFB)
fb_console_early_init(video);
}
}

/** Initialize the primary console. */
__init_text void console_init(void) {
kboot_tag_video_t *video = kboot_tag_iterate(KBOOT_TAG_VIDEO, NULL);
kboot_tag_video_t *video = kboot_tag_iterate(KBOOT_TAG_VIDEO, NULL);

if(debug_console.out && debug_console.out->init)
debug_console.out->init(video);
if(main_console.out && main_console.out->init)
main_console.out->init(video);
if (debug_console.out && debug_console.out->init)
debug_console.out->init(video);
if (main_console.out && main_console.out->init)
main_console.out->init(video);
}

/*
* Kernel console device functions.
*/

/** Perform I/O on the kernel console device.
* @param device Device to perform I/O on.
* @param handle File handle structure.
* @param request I/O request.
* @return Status code describing result of the operation. */
static status_t kconsole_device_io(device_t *device, file_handle_t *handle,
io_request_t *request)
{
char *buf;
size_t i;
uint16_t ch;
status_t ret;

buf = kmalloc(request->total, MM_USER);
if(!buf)
return STATUS_NO_MEMORY;

if(request->op == IO_OP_WRITE) {
if(!main_console.out) {
ret = STATUS_NOT_SUPPORTED;
goto out;
}

ret = io_request_copy(request, buf, request->total);
if(ret != STATUS_SUCCESS)
goto out;

for(i = 0; i < request->total; i++)
main_console.out->putc(buf[i]);
} else {
if(!main_console.in || !main_console.in->getc) {
ret = STATUS_NOT_SUPPORTED;
goto out;
}

for(i = 0; i < request->total; i++) {
/* TODO: Escape sequences for special keys, nonblock. */
do {
ret = main_console.in->getc(&ch);
if(ret != STATUS_SUCCESS)
goto out;
} while(ch > 0xFF);

buf[i] = ch;
}

ret = io_request_copy(request, buf, request->total);
}
* @param device Device to perform I/O on.
* @param handle File handle structure.
* @param request I/O request.
* @return Status code describing result of the operation. */
static status_t kconsole_device_io(device_t *device, file_handle_t *handle, io_request_t *request) {
char *buf;
size_t i;
uint16_t ch;
status_t ret;

buf = kmalloc(request->total, MM_USER);
if (!buf)
return STATUS_NO_MEMORY;

if (request->op == IO_OP_WRITE) {
if (!main_console.out) {
ret = STATUS_NOT_SUPPORTED;
goto out;
}

ret = io_request_copy(request, buf, request->total);
if (ret != STATUS_SUCCESS)
goto out;

for (i = 0; i < request->total; i++)
main_console.out->putc(buf[i]);
} else {
if (!main_console.in || !main_console.in->getc) {
ret = STATUS_NOT_SUPPORTED;
goto out;
}

for (i = 0; i < request->total; i++) {
/* TODO: Escape sequences for special keys, nonblock. */
do {
ret = main_console.in->getc(&ch);
if (ret != STATUS_SUCCESS)
goto out;
} while (ch > 0xff);

buf[i] = ch;
}

ret = io_request_copy(request, buf, request->total);
}

out:
kfree(buf);
return ret;
kfree(buf);
return ret;
}

/** Kernel console device operations structure. */
static device_ops_t kconsole_device_ops = {
.type = FILE_TYPE_CHAR,
.io = kconsole_device_io,
.type = FILE_TYPE_CHAR,
.io = kconsole_device_io,
};

/** Register the kernel console device. */
static __init_text void console_device_init(void) {
status_t ret;
status_t ret;

ret = device_create("kconsole", device_tree_root, &kconsole_device_ops,
NULL, NULL, 0, NULL);
if(ret != STATUS_SUCCESS)
fatal("Failed to register kernel console device (%d)", ret);
ret = device_create("kconsole", device_tree_root, &kconsole_device_ops, NULL, NULL, 0, NULL);
if (ret != STATUS_SUCCESS)
fatal("Failed to register kernel console device (%d)", ret);
}

INITCALL(console_device_init);

Large diffs are not rendered by default.

@@ -16,7 +16,7 @@

/**
* @file
* @brief CPU management.
* @brief CPU management.
*
* Each CPU in the system is tracked by a cpu_t structure. This contains
* information such as the CPU's ID, its current state, and its current
@@ -38,106 +38,110 @@
cpu_t boot_cpu;

/** Information about all CPUs. */
size_t highest_cpu_id = 0; /**< Highest CPU ID in the system. */
size_t cpu_count = 0; /**< Number of CPUs. */
LIST_DEFINE(running_cpus); /**< List of running CPUs. */
cpu_t **cpus = NULL; /**< Array of CPU structure pointers (index == CPU ID). */
size_t highest_cpu_id; /**< Highest CPU ID in the system. */
size_t cpu_count; /**< Number of CPUs. */
LIST_DEFINE(running_cpus); /**< List of running CPUs. */
cpu_t **cpus; /**< Array of CPU structure pointers (index == CPU ID). */

#if CONFIG_SMP

/** Variable to wait on while waiting for a CPU to boot. */
volatile int cpu_boot_wait = 0;
volatile int cpu_boot_wait;

#endif

/** Initialize a CPU structure.
* @param cpu Structure to initialize.
* @param id ID of the CPU to add.
* @param state State of the CPU. */
* @param cpu Structure to initialize.
* @param id ID of the CPU to add.
* @param state State of the CPU. */
static void cpu_ctor(cpu_t *cpu, cpu_id_t id, int state) {
memset(cpu, 0, sizeof(cpu_t));
list_init(&cpu->header);
cpu->id = id;
cpu->state = state;

#if CONFIG_SMP
/* Initialize SMP call information. */
list_init(&cpu->call_queue);
spinlock_init(&cpu->call_lock, "ipi_lock");
#endif

/* Initialize timer information. */
list_init(&cpu->timers);
spinlock_init(&cpu->timer_lock, "timer_lock");
memset(cpu, 0, sizeof(cpu_t));
list_init(&cpu->header);
cpu->id = id;
cpu->state = state;

#if CONFIG_SMP
/* Initialize SMP call information. */
list_init(&cpu->call_queue);
spinlock_init(&cpu->call_lock, "ipi_lock");
#endif

/* Initialize timer information. */
list_init(&cpu->timers);
spinlock_init(&cpu->timer_lock, "timer_lock");
}

#if CONFIG_SMP

/** Register a non-boot CPU.
* @param id ID of CPU to add.
* @param state Current state of the CPU.
* @return Pointer to CPU structure. */
* @param id ID of CPU to add.
* @param state Current state of the CPU.
* @return Pointer to CPU structure. */
cpu_t *cpu_register(cpu_id_t id, int state) {
cpu_t *cpu;
cpu_t *cpu;

assert(cpus);
assert(cpus);

cpu = kmalloc(sizeof(*cpu), MM_BOOT);
cpu_ctor(cpu, id, state);
cpu = kmalloc(sizeof(*cpu), MM_BOOT);
cpu_ctor(cpu, id, state);

/* Resize the CPU array if required. */
if(id > highest_cpu_id) {
cpus = krealloc(cpus, sizeof(cpu_t *) * (id + 1), MM_BOOT);
memset(&cpus[highest_cpu_id + 1], 0, (id - highest_cpu_id) * sizeof(cpu_t *));
/* Resize the CPU array if required. */
if (id > highest_cpu_id) {
cpus = krealloc(cpus, sizeof(cpu_t *) * (id + 1), MM_BOOT);
memset(&cpus[highest_cpu_id + 1], 0, (id - highest_cpu_id) * sizeof(cpu_t *));

highest_cpu_id = id;
}
highest_cpu_id = id;
}

assert(!cpus[id]);
cpus[id] = cpu;
cpu_count++;
return cpu;
assert(!cpus[id]);
cpus[id] = cpu;
cpu_count++;
return cpu;
}
#endif

#endif /* CONFIG_SMP */

/** Perform early CPU subsystem initialization. */
__init_text void cpu_early_init(void) {
/* The boot CPU is initially assigned an ID of 0. It is later corrected
* once we have the ability to get the real ID. */
cpu_ctor(&boot_cpu, 0, CPU_RUNNING);
/* The boot CPU is initially assigned an ID of 0. It is later corrected once
* we have the ability to get the real ID. */
cpu_ctor(&boot_cpu, 0, CPU_RUNNING);

/* Perform architecture initialization. This initializes some state
* shared between all CPUs. */
arch_cpu_early_init();
/* Perform architecture initialization. This initializes some state shared
* between all CPUs. */
arch_cpu_early_init();

/* We're being called on the boot CPU, initialize that. */
cpu_early_init_percpu(&boot_cpu);
/* We're being called on the boot CPU, initialize that. */
cpu_early_init_percpu(&boot_cpu);
}

/** Perform early per-CPU initialization.
* @param cpu Structure for the current CPU. */
* @param cpu Structure for the current CPU. */
__init_text void cpu_early_init_percpu(cpu_t *cpu) {
arch_cpu_early_init_percpu(cpu);
arch_cpu_early_init_percpu(cpu);

/* Add ourself to the running CPU list. */
cpu->state = CPU_RUNNING;
list_append(&running_cpus, &curr_cpu->header);
/* Add ourself to the running CPU list. */
cpu->state = CPU_RUNNING;
list_append(&running_cpus, &curr_cpu->header);
}

/** Properly initialize the CPU subsystem. */
__init_text void cpu_init(void) {
/* Get the real ID of the boot CPU. */
boot_cpu.id = highest_cpu_id = cpu_id();
cpu_count = 1;
/* Get the real ID of the boot CPU. */
boot_cpu.id = highest_cpu_id = cpu_id();
cpu_count = 1;

/* Create the initial CPU array and add the boot CPU to it. */
cpus = kcalloc(highest_cpu_id + 1, sizeof(cpu_t *), MM_BOOT);
cpus[boot_cpu.id] = &boot_cpu;
/* Create the initial CPU array and add the boot CPU to it. */
cpus = kcalloc(highest_cpu_id + 1, sizeof(cpu_t *), MM_BOOT);
cpus[boot_cpu.id] = &boot_cpu;

arch_cpu_init();
arch_cpu_init();

/* We are called on the boot CPU. */
cpu_init_percpu();
/* We are called on the boot CPU. */
cpu_init_percpu();
}

/** Perform additional per-CPU initialization. */
__init_text void cpu_init_percpu(void) {
arch_cpu_init_percpu();
arch_cpu_init_percpu();
}

Large diffs are not rendered by default.

@@ -16,9 +16,10 @@

/**
* @file
* @brief Deferred procedure call functions.
* @brief Deferred procedure call functions.
*
* @todo Per-CPU DPC thread.
* TODO:
* - Per-CPU DPC thread.
*/

#include <lib/list.h>
@@ -37,9 +38,9 @@

/** Structure describing a DPC request. */
typedef struct dpc_request {
list_t header; /**< Link to requests/free list. */
dpc_function_t function; /**< Function to call. */
void *arg; /**< Argument to pass to handler. */
list_t header; /**< Link to requests/free list. */
dpc_function_t function; /**< Function to call. */
void *arg; /**< Argument to pass to handler. */
} dpc_request_t;

/** Lists of free and pending DPC requests. */
@@ -51,47 +52,47 @@ static SPINLOCK_DEFINE(dpc_lock);
static SEMAPHORE_DEFINE(dpc_request_sem, 0);

/** DPC thread. */
static thread_t *dpc_thread = NULL;
static thread_t *dpc_thread;

/** DPC thread main function.
* @param arg1 Unused.
* @param arg2 Unused. */
* @param arg1 Unused.
* @param arg2 Unused. */
static void dpc_thread_func(void *arg1, void *arg2) {
dpc_request_t *request;

while(true) {
semaphore_down(&dpc_request_sem);

/* Get the next request in the list. */
spinlock_lock(&dpc_lock);
assert(!list_empty(&dpc_requests));
request = list_first(&dpc_requests, dpc_request_t, header);
list_remove(&request->header);
spinlock_unlock(&dpc_lock);

/* Call the function. */
request->function(request->arg);

/* Return the structure to the free list. */
spinlock_lock(&dpc_lock);
list_prepend(&dpc_free, &request->header);
spinlock_unlock(&dpc_lock);
}
dpc_request_t *request;

while (true) {
semaphore_down(&dpc_request_sem);

/* Get the next request in the list. */
spinlock_lock(&dpc_lock);
assert(!list_empty(&dpc_requests));
request = list_first(&dpc_requests, dpc_request_t, header);
list_remove(&request->header);
spinlock_unlock(&dpc_lock);

/* Call the function. */
request->function(request->arg);

/* Return the structure to the free list. */
spinlock_lock(&dpc_lock);
list_prepend(&dpc_free, &request->header);
spinlock_unlock(&dpc_lock);
}
}

/** DPC structure allocator.
* @return Pointer to allocated structure. */
* @return Pointer to allocated structure. */
static dpc_request_t *dpc_request_alloc(void) {
dpc_request_t *request;
dpc_request_t *request;

if(list_empty(&dpc_free)) {
/* TODO: Allocate more before we run out. */
fatal("Out of DPC request structures");
}
if (list_empty(&dpc_free)) {
/* TODO: Allocate more before we run out. */
fatal("Out of DPC request structures");
}

request = list_first(&dpc_free, dpc_request_t, header);
list_remove(&request->header);
return request;
request = list_first(&dpc_free, dpc_request_t, header);
list_remove(&request->header);
return request;
}

/**
@@ -100,50 +101,49 @@ static dpc_request_t *dpc_request_alloc(void) {
* Adds a function to the DPC queue to be called by the DPC thread. This
* function is safe to use from interrupt context.
*
* @param function Function to call.
* @param arg Argument to pass to the function.
* @param function Function to call.
* @param arg Argument to pass to the function.
*/
void dpc_request(dpc_function_t function, void *arg) {
dpc_request_t *request;
dpc_request_t *request;

spinlock_lock(&dpc_lock);
spinlock_lock(&dpc_lock);

request = dpc_request_alloc();
request->function = function;
request->arg = arg;
request = dpc_request_alloc();
request->function = function;
request->arg = arg;

/* Add it to the queue and wake up the DPC thread. */
list_append(&dpc_requests, &request->header);
semaphore_up(&dpc_request_sem, 1);
/* Add it to the queue and wake up the DPC thread. */
list_append(&dpc_requests, &request->header);
semaphore_up(&dpc_request_sem, 1);

spinlock_unlock(&dpc_lock);
spinlock_unlock(&dpc_lock);
}

/** Check whether the DPC system has been initialized.
* @return Whether initialized. */
* @return Whether initialized. */
bool dpc_inited(void) {
return dpc_thread;
return dpc_thread;
}

/** Initialize the DPC thread. */
__init_text void dpc_init(void) {
dpc_request_t *alloc;
status_t ret;
size_t i;

/* Allocate a chunk of DPC structures. We do not allocate a new
* structure upon every dpc_request() call to make it usable from
* interrupt context. */
alloc = kmem_alloc(PAGE_SIZE, MM_BOOT);
for(i = 0; i < (PAGE_SIZE / sizeof(dpc_request_t)); i++) {
list_init(&alloc[i].header);
list_append(&dpc_free, &alloc[i].header);
}

/* Create the DPC thread */
ret = thread_create("dpc", NULL, 0, dpc_thread_func, NULL, NULL, &dpc_thread);
if(ret != STATUS_SUCCESS)
fatal("Failed to create DPC thread: %d\n", ret);

thread_run(dpc_thread);
dpc_request_t *alloc;
status_t ret;
size_t i;

/* Allocate a chunk of DPC structures. We do not allocate a new structure
* upon every dpc_request() call to make it usable from interrupt context. */
alloc = kmem_alloc(PAGE_SIZE, MM_BOOT);
for (i = 0; i < (PAGE_SIZE / sizeof(dpc_request_t)); i++) {
list_init(&alloc[i].header);
list_append(&dpc_free, &alloc[i].header);
}

/* Create the DPC thread */
ret = thread_create("dpc", NULL, 0, dpc_thread_func, NULL, NULL, &dpc_thread);
if (ret != STATUS_SUCCESS)
fatal("Failed to create DPC thread: %d\n", ret);

thread_run(dpc_thread);
}

Large diffs are not rendered by default.

@@ -16,7 +16,7 @@

/**
* @file
* @brief Error handling functions.
* @brief Error handling functions.
*/

#include <arch/frame.h>
@@ -41,18 +41,18 @@
NOTIFIER_DEFINE(fatal_notifier, NULL);

/** Atomic variable to protect against nested calls to fatal(). */
static atomic_t in_fatal = 0;
static atomic_t in_fatal;

/** Helper for fatal_printf(). */
static void fatal_printf_helper(char ch, void *data, int *total) {
if(debug_console.out)
debug_console.out->putc(ch);
if(main_console.out)
main_console.out->putc(ch);
if (debug_console.out)
debug_console.out->putc(ch);
if (main_console.out)
main_console.out->putc(ch);

kboot_log_write(ch);
kboot_log_write(ch);

*total = *total + 1;
*total = *total + 1;
}

/**
@@ -61,38 +61,38 @@ static void fatal_printf_helper(char ch, void *data, int *total) {
* Halts all CPUs, prints a formatted error message to the console and enters
* KDB. The function will never return.
*
* @param frame Interrupt stack frame (if any).
* @param fmt Error message format string.
* @param ... Arguments to substitute into format string.
* @param frame Interrupt stack frame (if any).
* @param fmt Error message format string.
* @param ... Arguments to substitute into format string.
*/
void fatal_etc(frame_t *frame, const char *fmt, ...) {
va_list args;
va_list args;

local_irq_disable();
local_irq_disable();

if(atomic_inc(&in_fatal) == 0) {
/* Run callback functions registered. */
notifier_run_unsafe(&fatal_notifier, NULL, false);
if (atomic_inc(&in_fatal) == 0) {
/* Run callback functions registered. */
notifier_run_unsafe(&fatal_notifier, NULL, false);

do_printf(fatal_printf_helper, NULL, "\nFATAL: ");
va_start(args, fmt);
do_vprintf(fatal_printf_helper, NULL, fmt, args);
va_end(args);
do_printf(fatal_printf_helper, NULL, "\n");
do_printf(fatal_printf_helper, NULL, "\nFATAL: ");
va_start(args, fmt);
do_vprintf(fatal_printf_helper, NULL, fmt, args);
va_end(args);
do_printf(fatal_printf_helper, NULL, "\n");

kdb_enter(KDB_REASON_FATAL, frame);
}
kdb_enter(KDB_REASON_FATAL, frame);
}

/* Halt the current CPU. */
arch_cpu_halt();
/* Halt the current CPU. */
arch_cpu_halt();
}

/** Handle failure of an assertion.
* @param cond String of the condition that failed.
* @param file File name that contained the assertion.
* @param line Line number of the assertion. */
* @param cond String of the condition that failed.
* @param file File name that contained the assertion.
* @param line Line number of the assertion. */
void __assert_fail(const char *cond, const char *file, int line) {
fatal("Assertion `%s' failed\nat %s:%d", cond, file, line);
fatal("Assertion `%s' failed\nat %s:%d", cond, file, line);
}

/**
@@ -101,16 +101,16 @@ void __assert_fail(const char *cond, const char *file, int line) {
* Prints a fatal error message and halts the system. The calling process must
* have the PRIV_FATAL privilege.
*
* @param message Message to print.
* @param message Message to print.
*/
void kern_system_fatal(const char *message) {
char *kmessage;
char *kmessage;

if(!security_check_priv(PRIV_FATAL))
return;
if (!security_check_priv(PRIV_FATAL))
return;

if(strdup_from_user(message, &kmessage) != STATUS_SUCCESS)
return;
if (strdup_from_user(message, &kmessage) != STATUS_SUCCESS)
return;

fatal("%s", message);
fatal("%s", message);
}
@@ -16,7 +16,7 @@

/**
* @file
* @brief Assertion function.
* @brief Assertion function.
*/

#ifndef __ASSERT_H
@@ -27,18 +27,21 @@
#if CONFIG_DEBUG

/** Raise a fatal error if the given condition is not met.
* @param cond Condition to test. */
#define assert(cond) \
if(unlikely(!(cond))) { __assert_fail(#cond, __FILE__, __LINE__); }
* @param cond Condition to test. */
#define assert(cond) \
if (unlikely(!(cond))) \
__assert_fail(#cond, __FILE__, __LINE__);

#else
#define assert(cond) ((void)0)

#define assert(cond) ((void)0)

#endif

extern void __assert_fail(const char *cond, const char *file, int line) __noreturn;

#ifndef __cplusplus
#define static_assert(cond, err) _Static_assert(cond, err)
# define static_assert(cond, err) _Static_assert(cond, err)
#endif

#endif /* __ASSERT_H */
@@ -1,5 +1,5 @@
/*
* Copyright (C) 2009-2013 Alex Smith
* Copyright (C) 2009-2015 Alex Smith
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -16,45 +16,48 @@

/**
* @file
* @brief Compiler-specific macros/definitions.
* @brief Compiler-specific macros/definitions.
*/

#ifndef __COMPILER_H
#define __COMPILER_H

#include <arch/cache.h>

#ifdef __GNUC__
# define __unused __attribute__((unused))
# define __used __attribute__((used))
# define __packed __attribute__((packed))
# define __aligned(a) __attribute__((aligned(a)))
# define __noreturn __attribute__((noreturn))
# define __malloc __attribute__((malloc))
# define __printf(a, b) __attribute__((format(printf, a, b)))
# define __deprecated __attribute__((deprecated))
# define __always_inline __attribute__((always_inline))
# ifdef __clang_analyzer__
# define __init_text
# define __init_data
# define __section(s)
# define __export
# else
# define __init_text __attribute__((section(".init.text")))
# define __init_data __attribute__((section(".init.data")))
# define __section(s) __attribute__((section(s)))
# define __hidden __attribute__((visibility("hidden")))
# define __export __attribute__((visibility("default")))
# endif
# define __cacheline_aligned __aligned(CPU_CACHE_SIZE)
# define likely(x) __builtin_expect(!!(x), 1)
# define unlikely(x) __builtin_expect(!!(x), 0)
# define compiler_barrier() __asm__ volatile("" ::: "memory")
#ifndef __GNUC__
# error "Must be compiled with a GCC-compatible compiler"
#endif

#define __unused __attribute__((unused))
#define __used __attribute__((used))
#define __packed __attribute__((packed))
#define __aligned(a) __attribute__((aligned(a)))
#define __noreturn __attribute__((noreturn))
#define __malloc __attribute__((malloc))
#define __printf(a, b) __attribute__((format(printf, a, b)))
#define __deprecated __attribute__((deprecated))
#define __always_inline __attribute__((always_inline))
#define __cacheline_aligned __aligned(CPU_CACHE_SIZE)

#ifdef __clang_analyzer__
# define __init_text
# define __init_data
# define __section(s)
# define __export
#else
# error "Kiwi does not currently support compilers other than GCC"
# define __init_text __attribute__((section(".init.text")))
# define __init_data __attribute__((section(".init.data")))
# define __section(s) __attribute__((section(s)))
# define __hidden __attribute__((visibility("hidden")))
# define __export __attribute__((visibility("default")))
#endif

#define STRINGIFY(val) #val
#define XSTRINGIFY(val) STRINGIFY(val)
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#define compiler_barrier() __asm__ volatile("" ::: "memory")
#define unreachable() __builtin_unreachable()

#define STRINGIFY(val) #val
#define XSTRINGIFY(val) STRINGIFY(val)

#endif /* __COMPILER_H */
@@ -16,7 +16,7 @@

/**
* @file
* @brief Kernel console functions.
* @brief Kernel console functions.
*/

#ifndef __CONSOLE_H
@@ -36,39 +36,38 @@ struct kboot_tag_video;
* called without, so they should perform locking themselves.
*/
typedef struct console_out_ops {
/** Properly initialize the console after memory management setup.
* @param video KBoot video tag. */
void (*init)(struct kboot_tag_video *video);
/** Properly initialize the console after memory management setup.
* @param video KBoot video tag. */
void (*init)(struct kboot_tag_video *video);

/** Write a character to the console.
* @param ch Character to write. */
void (*putc)(char ch);
/** Write a character to the console.
* @param ch Character to write. */
void (*putc)(char ch);
} console_out_ops_t;

/** Kernel console input operations structure. */
typedef struct console_in_ops {
/** Check for a character from the console.
* @note This function must be safe to use from interrupt
* context.
* @return Character read, or 0 if none available. */
uint16_t (*poll)(void);

/** Read a character from the console, blocking until it can do so.
* @param ch Where to store character read.
* @return Status code describing the result of the
* operation. */
status_t (*getc)(uint16_t *chp);
/** Check for a character from the console.
* @note This function must be safe to use from interrupt
* context.
* @return Character read, or 0 if none available. */
uint16_t (*poll)(void);

/** Read a character from the console, blocking until it can do so.
* @param _ch Where to store character read.
* @return Status code describing the result of the operation. */
status_t (*getc)(uint16_t *_ch);
} console_in_ops_t;

/** Special console key definitions. */
#define CONSOLE_KEY_UP 0x100
#define CONSOLE_KEY_DOWN 0x101
#define CONSOLE_KEY_LEFT 0x102
#define CONSOLE_KEY_RIGHT 0x103
#define CONSOLE_KEY_HOME 0x104
#define CONSOLE_KEY_END 0x105
#define CONSOLE_KEY_PGUP 0x106
#define CONSOLE_KEY_PGDN 0x107
#define CONSOLE_KEY_UP 0x100
#define CONSOLE_KEY_DOWN 0x101
#define CONSOLE_KEY_LEFT 0x102
#define CONSOLE_KEY_RIGHT 0x103
#define CONSOLE_KEY_HOME 0x104
#define CONSOLE_KEY_END 0x105
#define CONSOLE_KEY_PGUP 0x106
#define CONSOLE_KEY_PGDN 0x107

/**
* Kernel console structure.
@@ -82,8 +81,8 @@ typedef struct console_in_ops {
* while input is handled by the input driver.
*/
typedef struct console {
console_out_ops_t *out; /**< Output operations. */
console_in_ops_t *in; /**< Input operations. */
console_out_ops_t *out; /**< Output operations. */
console_in_ops_t *in; /**< Input operations. */
} console_t;

extern console_t main_console;
@@ -96,17 +95,17 @@ extern void console_init(void);

/** Framebuffer information structure. */
typedef struct fb_info {
uint16_t width; /**< Width of the framebuffer. */
uint16_t height; /**< Height of the framebuffer. */
uint8_t depth; /**< Colour depth of the framebuffer (bits per pixel). */
uint8_t bytes_per_pixel; /**< Bytes per pixel. */
uint8_t red_position; /**< Red field position. */
uint8_t red_size; /**< Red field size. */
uint8_t green_position; /**< Green field position. */
uint8_t green_size; /**< Green field size. */
uint8_t blue_position; /**< Blue field position. */
uint8_t blue_size; /**< Blue field size. */
phys_ptr_t addr; /**< Physical address of the framebuffer. */
uint16_t width; /**< Width of the framebuffer. */
uint16_t height; /**< Height of the framebuffer. */
uint8_t depth; /**< Colour depth of the framebuffer (bits per pixel). */
uint8_t bytes_per_pixel; /**< Bytes per pixel. */
uint8_t red_position; /**< Red field position. */
uint8_t red_size; /**< Red field size. */
uint8_t green_position; /**< Green field position. */
uint8_t green_size; /**< Green field size. */
uint8_t blue_position; /**< Blue field position. */
uint8_t blue_size; /**< Blue field size. */
phys_ptr_t addr; /**< Physical address of the framebuffer. */
} fb_info_t;

extern void fb_console_info(fb_info_t *info);
@@ -16,7 +16,7 @@

/**
* @file
* @brief CPU management.
* @brief CPU management.
*/

#ifndef __CPU_H
@@ -33,36 +33,36 @@ struct vm_aspace;

/** Structure describing a CPU. */
typedef struct cpu {
list_t header; /**< Link to running CPUs list. */

cpu_id_t id; /**< ID of the CPU. */
arch_cpu_t arch; /**< Architecture-specific information. */

/** Current state of the CPU. */
enum {
CPU_OFFLINE, /**< Offline. */
CPU_RUNNING, /**< Running. */
} state;

/** Scheduler information. */
struct sched_cpu *sched; /**< Scheduler run queues/timers. */
struct thread *thread; /**< Currently executing thread. */
struct vm_aspace *aspace; /**< Address space currently in use. */
bool should_preempt; /**< Whether the CPU should be preempted. */
bool idle; /**< Whether the CPU is idle. */

/** Timer information. */
list_t timers; /**< List of active timers. */
bool timer_enabled; /**< Whether the timer device is enabled. */
spinlock_t timer_lock; /**< Timer list lock. */

#if CONFIG_SMP
/** SMP call information. */
list_t call_queue; /**< List of calls queued to this CPU. */
bool ipi_sent; /**< Whether an IPI has been sent to the CPU. */
struct smp_call *curr_call; /**< SMP call currently being handled. */
spinlock_t call_lock; /**< Lock to protect call queue. */
#endif
list_t header; /**< Link to running CPUs list. */

cpu_id_t id; /**< ID of the CPU. */
arch_cpu_t arch; /**< Architecture-specific information. */

/** Current state of the CPU. */
enum {
CPU_OFFLINE, /**< Offline. */
CPU_RUNNING, /**< Running. */
} state;

/** Scheduler information. */
struct sched_cpu *sched; /**< Scheduler run queues/timers. */
struct thread *thread; /**< Currently executing thread. */
struct vm_aspace *aspace; /**< Address space currently in use. */
bool should_preempt; /**< Whether the CPU should be preempted. */
bool idle; /**< Whether the CPU is idle. */

/** Timer information. */
list_t timers; /**< List of active timers. */
bool timer_enabled; /**< Whether the timer device is enabled. */
spinlock_t timer_lock; /**< Timer list lock. */

#if CONFIG_SMP
/** SMP call information. */
list_t call_queue; /**< List of calls queued to this CPU. */
bool ipi_sent; /**< Whether an IPI has been sent to the CPU. */
struct smp_call *curr_call; /**< SMP call currently being handled. */
spinlock_t call_lock; /**< Lock to protect call queue. */
#endif
} cpu_t;

/**
@@ -73,7 +73,7 @@ typedef struct cpu {
* cannot be migrated to a different CPU, i.e. preemption or interrupts
* disabled.
*/
#define curr_cpu (arch_curr_cpu())
#define curr_cpu (arch_curr_cpu())

extern cpu_t boot_cpu;
extern size_t highest_cpu_id;
@@ -16,7 +16,7 @@

/**
* @file
* @brief Interrupt handling code.
* @brief Interrupt handling code.
*/

#ifndef __DEVICE_IRQ_H
@@ -26,52 +26,52 @@

/** IRQ handler return status. */
typedef enum irq_status {
IRQ_UNHANDLED, /**< Interrupt was not handled. */
IRQ_HANDLED, /**< Interrupt was handled. */
IRQ_PREEMPT, /**< Interrupt was handled, and the current thread should be preempted. */
IRQ_RUN_THREAD, /**< Interrupt was handled, and the threaded handler should be run. */
IRQ_UNHANDLED, /**< Interrupt was not handled. */
IRQ_HANDLED, /**< Interrupt was handled. */
IRQ_PREEMPT, /**< Interrupt was handled, current thread should be preempted. */
IRQ_RUN_THREAD, /**< Interrupt was handled, threaded handler should be run. */
} irq_status_t;

/** IRQ trigger modes. */
typedef enum irq_mode {
IRQ_MODE_LEVEL, /**< Level-triggered. */
IRQ_MODE_EDGE, /**< Edge-triggered. */
IRQ_MODE_LEVEL, /**< Level-triggered. */
IRQ_MODE_EDGE, /**< Edge-triggered. */
} irq_mode_t;

/** IRQ controller structure. */
typedef struct irq_controller {
/** Pre-handling function.
* @param num IRQ number.
* @return True if IRQ should be handled. */
bool (*pre_handle)(unsigned num);

/** Post-handling function.
* @param num IRQ number. */
void (*post_handle)(unsigned num);

/** Get IRQ trigger mode.
* @param num IRQ number.
* @return Trigger mode of the IRQ. */
irq_mode_t (*mode)(unsigned num);

/** Enable an IRQ.
* @param num IRQ number. */
void (*enable)(unsigned num);

/** Disable an IRQ.
* @param num IRQ number. */
void (*disable)(unsigned num);
/** Pre-handling function.
* @param num IRQ number.
* @return True if IRQ should be handled. */
bool (*pre_handle)(unsigned num);

/** Post-handling function.
* @param num IRQ number. */
void (*post_handle)(unsigned num);

/** Get IRQ trigger mode.
* @param num IRQ number.
* @return Trigger mode of the IRQ. */
irq_mode_t (*mode)(unsigned num);

/** Enable an IRQ.
* @param num IRQ number. */
void (*enable)(unsigned num);

/** Disable an IRQ.
* @param num IRQ number. */
void (*disable)(unsigned num);
} irq_controller_t;

/** IRQ top-half handler function type.
* @param num IRQ number.
* @param data Data pointer associated with the handler.
* @return IRQ status code. */
* @param num IRQ number.
* @param data Data pointer associated with the handler.
* @return IRQ status code. */
typedef irq_status_t (*irq_top_t)(unsigned num, void *data);

/** IRQ bottom-half handler function type.
* @param num IRQ number.
* @param data Data pointer associated with the handler. */
* @param num IRQ number.
* @param data Data pointer associated with the handler. */
typedef void (*irq_bottom_t)(unsigned num, void *data);

extern status_t irq_register(unsigned num, irq_top_t top, irq_bottom_t bottom, void *data);
@@ -16,14 +16,14 @@

/**
* @file
* @brief Deferred procedure call functions.
* @brief Deferred procedure call functions.
*/

#ifndef __DPC_H
#define __DPC_H

/** Handler function for a DPC.
* @param arg Argument passed to dpc_run(). */
* @param arg Argument passed to dpc_run(). */
typedef void (*dpc_function_t)(void *arg);

extern void dpc_request(dpc_function_t func, void *arg);
@@ -16,7 +16,7 @@

/**
* @file
* @brief ELF loader.
* @brief ELF loader.
*/

#ifndef __KERNEL_ELF_H
@@ -34,45 +34,43 @@ struct vm_aspace;

/** ELF image information structure. */
typedef struct elf_image {
list_t header; /**< List to loaded image list. */

image_id_t id; /**< ID of the image. */
char *name; /**< Name of the image. */
ptr_t load_base; /**< Base address of image.. */
size_t load_size; /**< Total size of image. */
elf_ehdr_t *ehdr; /**< ELF executable header. */
elf_phdr_t *phdrs; /**< Program headers (only valid during loading). */
elf_shdr_t *shdrs; /**< ELF section headers. */

/** Symbol/string tables.
* @warning For user images, these are user pointers. */
void *symtab; /**< Symbol table. */
uint32_t sym_size; /**< Size of symbol table. */
uint32_t sym_entsize; /**< Size of a single symbol table entry. */
void *strtab; /**< String table. */
list_t header; /**< List to loaded image list. */

image_id_t id; /**< ID of the image. */
char *name; /**< Name of the image. */
ptr_t load_base; /**< Base address of image.. */
size_t load_size; /**< Total size of image. */
elf_ehdr_t *ehdr; /**< ELF executable header. */
elf_phdr_t *phdrs; /**< Program headers (only valid during loading). */
elf_shdr_t *shdrs; /**< ELF section headers. */

/** Symbol/string tables.
* @warning For user images, these are user pointers. */
void *symtab; /**< Symbol table. */
uint32_t sym_size; /**< Size of symbol table. */
uint32_t sym_entsize; /**< Size of a single symbol table entry. */
void *strtab; /**< String table. */
} elf_image_t;

extern status_t elf_binary_reserve(object_handle_t *handle, struct vm_aspace *as);
extern status_t elf_binary_load(object_handle_t *handle, const char *path,
struct vm_aspace *as, ptr_t dest, elf_image_t **imagep);
extern status_t elf_binary_load(
object_handle_t *handle, const char *path, struct vm_aspace *as, ptr_t dest,
elf_image_t **_image);
extern ptr_t elf_binary_finish(elf_image_t *image);

extern status_t arch_elf_module_relocate_rel(elf_image_t *image, elf_rel_t *rel,
elf_shdr_t *target);
extern status_t arch_elf_module_relocate_rela(elf_image_t *image, elf_rela_t *rela,
elf_shdr_t *target);
extern status_t arch_elf_module_relocate_rel(elf_image_t *image, elf_rel_t *rel, elf_shdr_t *target);
extern status_t arch_elf_module_relocate_rela(elf_image_t *image, elf_rela_t *rela, elf_shdr_t *target);

extern status_t elf_module_resolve(elf_image_t *image, size_t num, elf_addr_t *valp);
extern status_t elf_module_resolve(elf_image_t *image, size_t num, elf_addr_t *_val);

extern status_t elf_module_load(object_handle_t *handle, const char *path,
elf_image_t *image);
extern status_t elf_module_load(object_handle_t *handle, const char *path, elf_image_t *image);
extern status_t elf_module_finish(elf_image_t *image);
extern void elf_module_destroy(elf_image_t *image);

extern bool elf_symbol_from_addr(elf_image_t *image, ptr_t addr, struct symbol *symbol,
size_t *offp);
extern bool elf_symbol_lookup(elf_image_t *image, const char *name, bool global,
bool exported, struct symbol *symbol);
extern bool elf_symbol_from_addr(elf_image_t *image, ptr_t addr, struct symbol *symbol, size_t *_off);
extern bool elf_symbol_lookup(
elf_image_t *image, const char *name, bool global, bool exported,
struct symbol *symbol);

extern void elf_init(elf_image_t *image);

@@ -16,7 +16,7 @@

/**
* @file
* @brief Endian conversion functions.
* @brief Endian conversion functions.
*/

#ifndef __ENDIAN_H
@@ -25,74 +25,74 @@
#include <types.h>

/** Swap byte order in a 16-bit value.
* @param val Value to swap order of.
* @return Converted value. */
* @param val Value to swap order of.
* @return Converted value. */
static inline uint16_t byte_order_swap16(uint16_t val) {
uint16_t out = 0;
uint16_t out = 0;

out |= (val & 0x00ff) << 8;
out |= (val & 0xff00) >> 8;
return out;
out |= (val & 0x00ff) << 8;
out |= (val & 0xff00) >> 8;
return out;
}

/** Swap byte order in a 32-bit value.
* @param val Value to swap order of.
* @return Converted value. */
* @param val Value to swap order of.
* @return Converted value. */
static inline uint32_t byte_order_swap32(uint32_t val) {
uint32_t out = 0;
uint32_t out = 0;

out |= (val & 0x000000ff) << 24;
out |= (val & 0x0000ff00) << 8;
out |= (val & 0x00ff0000) >> 8;
out |= (val & 0xff000000) >> 24;
return out;
out |= (val & 0x000000ff) << 24;
out |= (val & 0x0000ff00) << 8;
out |= (val & 0x00ff0000) >> 8;
out |= (val & 0xff000000) >> 24;
return out;
}

/** Swap byte order in a 64-bit value.
* @param val Value to swap order of.
* @return Converted value. */
* @param val Value to swap order of.
* @return Converted value. */
static inline uint64_t byte_order_swap64(uint64_t val) {
uint64_t out = 0;
uint64_t out = 0;

out |= (val & 0x00000000000000ff) << 56;
out |= (val & 0x000000000000ff00) << 40;
out |= (val & 0x0000000000ff0000) << 24;
out |= (val & 0x00000000ff000000) << 8;
out |= (val & 0x000000ff00000000) >> 8;
out |= (val & 0x0000ff0000000000) >> 24;
out |= (val & 0x00ff000000000000) >> 40;
out |= (val & 0xff00000000000000) >> 56;
return out;
out |= (val & 0x00000000000000ff) << 56;
out |= (val & 0x000000000000ff00) << 40;
out |= (val & 0x0000000000ff0000) << 24;
out |= (val & 0x00000000ff000000) << 8;
out |= (val & 0x000000ff00000000) >> 8;
out |= (val & 0x0000ff0000000000) >> 24;
out |= (val & 0x00ff000000000000) >> 40;
out |= (val & 0xff00000000000000) >> 56;
return out;
}

#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
# define be16_to_cpu(v) byte_order_swap16((v))
# define be32_to_cpu(v) byte_order_swap32((v))
# define be64_to_cpu(v) byte_order_swap64((v))
# define le16_to_cpu(v) (v)
# define le32_to_cpu(v) (v)
# define le64_to_cpu(v) (v)
# define cpu_to_be16(v) byte_order_swap16((v))
# define cpu_to_be32(v) byte_order_swap32((v))
# define cpu_to_be64(v) byte_order_swap64((v))
# define cpu_to_le16(v) (v)
# define cpu_to_le32(v) (v)
# define cpu_to_le64(v) (v)
# define be16_to_cpu(v) byte_order_swap16((v))
# define be32_to_cpu(v) byte_order_swap32((v))
# define be64_to_cpu(v) byte_order_swap64((v))
# define le16_to_cpu(v) (v)
# define le32_to_cpu(v) (v)
# define le64_to_cpu(v) (v)
# define cpu_to_be16(v) byte_order_swap16((v))
# define cpu_to_be32(v) byte_order_swap32((v))
# define cpu_to_be64(v) byte_order_swap64((v))
# define cpu_to_le16(v) (v)
# define cpu_to_le32(v) (v)
# define cpu_to_le64(v) (v)
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
# define be16_to_cpu(v) (v)
# define be32_to_cpu(v) (v)
# define be64_to_cpu(v) (v)
# define le16_to_cpu(v) byte_order_swap16((v))
# define le32_to_cpu(v) byte_order_swap32((v))
# define le64_to_cpu(v) byte_order_swap64((v))
# define cpu_to_be16(v) (v)
# define cpu_to_be32(v) (v)
# define cpu_to_be64(v) (v)
# define cpu_to_le16(v) byte_order_swap16((v))
# define cpu_to_le32(v) byte_order_swap32((v))
# define cpu_to_le64(v) byte_order_swap64((v))
# define be16_to_cpu(v) (v)
# define be32_to_cpu(v) (v)
# define be64_to_cpu(v) (v)
# define le16_to_cpu(v) byte_order_swap16((v))
# define le32_to_cpu(v) byte_order_swap32((v))
# define le64_to_cpu(v) byte_order_swap64((v))
# define cpu_to_be16(v) (v)
# define cpu_to_be32(v) (v)
# define cpu_to_be64(v) (v)
# define cpu_to_le16(v) byte_order_swap16((v))
# define cpu_to_le32(v) byte_order_swap32((v))
# define cpu_to_le64(v) byte_order_swap64((v))
#else
# error "__BYTE_ORDER__ is not defined"
# error "__BYTE_ORDER__ is not defined"
#endif

#endif /* __ENDIAN_H */
@@ -16,7 +16,7 @@

/**
* @file
* @brief I/O context functions.
* @brief I/O context functions.
*/

#ifndef __IO_CONTEXT_H
@@ -29,9 +29,9 @@ struct process;

/** Structure containing an I/O context. */
typedef struct io_context {
rwlock_t lock; /**< Lock to protect context. */
struct fs_dentry *root_dir; /**< Root directory. */
struct fs_dentry *curr_dir; /**< Current working directory. */
rwlock_t lock; /**< Lock to protect context. */
struct fs_dentry *root_dir; /**< Root directory. */
struct fs_dentry *curr_dir; /**< Current working directory. */
} io_context_t;

extern void io_process_init(struct process *process, struct process *parent);
@@ -16,7 +16,7 @@

/**
* @file
* @brief Device manager.
* @brief Device manager.
*/

#ifndef __IO_DEVICE_H
@@ -35,169 +35,172 @@ struct device;

/** Structure containing device operations. */
typedef struct device_ops {
file_type_t type; /**< Type of the device. */

/** Clean up all data associated with a device.
* @param device Device to destroy. */
void (*destroy)(struct device *device);

/** Handler for open calls.
* @note Called with device lock held.
* @param device Device being opened.
* @param flags Flags being opened with.
* @param datap Where to store handle-specific data pointer.
* @return Status code describing result of operation. */
status_t (*open)(struct device *device, uint32_t flags, void **datap);

/** Handler for close calls.
* @note Called with device lock held.
* @param device Device being released.
* @param handle File handle structure. */
void (*close)(struct device *device, file_handle_t *handle);

/** Signal that a device event is being waited for.
* @note If the event being waited for has occurred
* already, this function should call the callback
* function and return success.
* @param device Device to wait on.
* @param handle File handle structure.
* @param event Event that is being waited for.
* @return Status code describing result of the operation. */
status_t (*wait)(struct device *device, file_handle_t *handle, object_event_t *event);

/** Stop waiting for a device event.
* @param device Device being waited on.
* @param handle File handle structure.
* @param event Event that is being waited for. */
void (*unwait)(struct device *device, file_handle_t *handle, object_event_t *event);

/** Perform I/O on a device.
* @param device Device to perform I/O on.
* @param handle File handle structure.
* @param request I/O request.
* @return Status code describing result of the operation. */
status_t (*io)(struct device *device, file_handle_t *handle, struct io_request *request);

/** Map a device into memory.
* @note See object_type_t::map() for more details on the
* behaviour of this function.
* @param device Device to map.
* @param handle File handle structure.
* @param region Region being mapped.
* @return Status code describing result of the operation. */
status_t (*map)(struct device *device, struct file_handle *handle, struct vm_region *region);

/** Handler for device-specific requests.
* @param device Device request is being made on.
* @param handle File handle structure.
* @param request Request number.
* @param in Input buffer.
* @param in_size Input buffer size.
* @param outp Where to store pointer to output buffer.
* @param out_sizep Where to store output buffer size.
* @return Status code describing result of operation. */
status_t (*request)(struct device *device, file_handle_t *handle,
unsigned request, const void *in, size_t in_size, void **outp,
size_t *out_sizep);
file_type_t type; /**< Type of the device. */

/** Clean up all data associated with a device.
* @param device Device to destroy. */
void (*destroy)(struct device *device);

/** Handler for open calls.
* @note Called with device lock held.
* @param device Device being opened.
* @param flags Flags being opened with.
* @param _data Where to store handle-specific data pointer.
* @return Status code describing result of operation. */
status_t (*open)(struct device *device, uint32_t flags, void **_data);

/** Handler for close calls.
* @note Called with device lock held.
* @param device Device being released.
* @param handle File handle structure. */
void (*close)(struct device *device, file_handle_t *handle);

/** Signal that a device event is being waited for.
* @note If the event being waited for has occurred
* already, this function should call the callback
* function and return success.
* @param device Device to wait on.
* @param handle File handle structure.
* @param event Event that is being waited for.
* @return Status code describing result of the operation. */
status_t (*wait)(struct device *device, file_handle_t *handle, object_event_t *event);

/** Stop waiting for a device event.
* @param device Device being waited on.
* @param handle File handle structure.
* @param event Event that is being waited for. */
void (*unwait)(struct device *device, file_handle_t *handle, object_event_t *event);

/** Perform I/O on a device.
* @param device Device to perform I/O on.
* @param handle File handle structure.
* @param request I/O request.
* @return Status code describing result of the operation. */
status_t (*io)(struct device *device, file_handle_t *handle, struct io_request *request);

/** Map a device into memory.
* @note See object_type_t::map() for more details on the
* behaviour of this function.
* @param device Device to map.
* @param handle File handle structure.
* @param region Region being mapped.
* @return Status code describing result of the operation. */
status_t (*map)(struct device *device, struct file_handle *handle, struct vm_region *region);

/** Handler for device-specific requests.
* @param device Device request is being made on.
* @param handle File handle structure.
* @param request Request number.
* @param in Input buffer.
* @param in_size Input buffer size.
* @param _out Where to store pointer to output buffer.
* @param _out_size Where to store output buffer size.
* @return Status code describing result of operation. */
status_t (*request)(
struct device *device, file_handle_t *handle, unsigned request,
const void *in, size_t in_size, void **_out, size_t *_out_size);
} device_ops_t;

/** Device attribute structure. */
typedef struct device_attr {
const char *name; /**< Attribute name. */

/** Attribute type. */
enum {
DEVICE_ATTR_UINT8, /**< 8-bit unsigned integer value. */
DEVICE_ATTR_UINT16, /**< 16-bit unsigned integer value. */
DEVICE_ATTR_UINT32, /**< 32-bit unsigned integer value. */
DEVICE_ATTR_UINT64, /**< 64-bit unsigned integer value. */
DEVICE_ATTR_STRING, /**< String value. */
} type;

/** Attribute value. */
union {
uint8_t uint8; /**< DEVICE_ATTR_UINT8. */
uint16_t uint16; /**< DEVICE_ATTR_UINT16. */
uint32_t uint32; /**< DEVICE_ATTR_UINT32. */
uint64_t uint64; /**< DEVICE_ATTR_UINT64. */
const char *string; /**< DEVICE_ATTR_STRING. */
} value;
const char *name; /**< Attribute name. */

/** Attribute type. */
enum {
DEVICE_ATTR_UINT8, /**< 8-bit unsigned integer value. */
DEVICE_ATTR_UINT16, /**< 16-bit unsigned integer value. */
DEVICE_ATTR_UINT32, /**< 32-bit unsigned integer value. */
DEVICE_ATTR_UINT64, /**< 64-bit unsigned integer value. */
DEVICE_ATTR_STRING, /**< String value. */
} type;

/** Attribute value. */
union {
uint8_t uint8; /**< DEVICE_ATTR_UINT8. */
uint16_t uint16; /**< DEVICE_ATTR_UINT16. */
uint32_t uint32; /**< DEVICE_ATTR_UINT32. */
uint64_t uint64; /**< DEVICE_ATTR_UINT64. */
const char *string; /**< DEVICE_ATTR_STRING. */
} value;
} device_attr_t;

/** Structure describing an entry in the device tree. */
typedef struct device {
file_t file; /**< File header. */

char *name; /**< Name of the device. */
mutex_t lock; /**< Lock to protect structure. */
refcount_t count; /**< Number of users of the device. */

struct device *parent; /**< Parent tree entry. */
radix_tree_t children; /**< Child devices. */
struct device *dest; /**< Destination device if this is an alias. */
union {
list_t aliases; /**< Aliases for this device. */
list_t dest_link; /**< Link to destination's aliases list. */
};

device_ops_t *ops; /**< Operations structure for the device. */
void *data; /**< Data used by the device's creator. */
device_attr_t *attrs; /**< Array of attribute structures. */
size_t attr_count; /**< Number of attributes. */
file_t file; /**< File header. */

char *name; /**< Name of the device. */
mutex_t lock; /**< Lock to protect structure. */
refcount_t count; /**< Number of users of the device. */

struct device *parent; /**< Parent tree entry. */
radix_tree_t children; /**< Child devices. */
struct device *dest; /**< Destination device if this is an alias. */
union {
list_t aliases; /**< Aliases for this device. */
list_t dest_link; /**< Link to destination's aliases list. */
};

device_ops_t *ops; /**< Operations structure for the device. */
void *data; /**< Data used by the device's creator. */
device_attr_t *attrs; /**< Array of attribute structures. */
size_t attr_count; /**< Number of attributes. */
} device_t;

/** Return values from device_iterate_t. */
enum {
DEVICE_ITERATE_END, /**< Finish iteration. */
DEVICE_ITERATE_DESCEND, /**< Descend onto children. */
DEVICE_ITERATE_RETURN, /**< Return to parent. */
DEVICE_ITERATE_END, /**< Finish iteration. */
DEVICE_ITERATE_DESCEND, /**< Descend onto children. */
DEVICE_ITERATE_RETURN, /**< Return to parent. */
};

/** Device tree iteration callback.
* @param device Device currently on.
* @param data Iteration data.
* @return Action to perform (DEVICE_ITERATE_*). */
* @param device Device currently on.
* @param data Iteration data.
* @return Action to perform (DEVICE_ITERATE_*). */
typedef int (*device_iterate_t)(device_t *device, void *data);

/** Start of class-specific event/request numbers. */
#define DEVICE_CLASS_EVENT_START 32
#define DEVICE_CLASS_REQUEST_START 32
#define DEVICE_CLASS_EVENT_START 32
#define DEVICE_CLASS_REQUEST_START 32

/** Start of device-specific event/request numbers. */
#define DEVICE_CUSTOM_EVENT_START 1024
#define DEVICE_CUSTOM_REQUEST_START 1024
#define DEVICE_CUSTOM_EVENT_START 1024
#define DEVICE_CUSTOM_REQUEST_START 1024

extern device_t *device_tree_root;
extern device_t *device_bus_dir;

/** Get the name of a device from a handle.
* @param handle Handle to get name from.
* @return Name of the device. */
* @param handle Handle to get name from.
* @return Name of the device. */
static inline const char *device_name(object_handle_t *_handle) {
file_handle_t *handle = _handle->private;
device_t *device = (device_t *)handle->file;
file_handle_t *handle = _handle->private;
device_t *device = (device_t *)handle->file;

return device->name;
return device->name;
}

extern status_t device_create(const char *name, device_t *parent,
device_ops_t *ops, void *data, device_attr_t *attrs, size_t count,
device_t **devicep);
extern status_t device_alias(const char *name, device_t *parent, device_t *dest,
device_t **devicep);
extern status_t device_create(
const char *name, device_t *parent, device_ops_t *ops, void *data,
device_attr_t *attrs, size_t count, device_t **_device);
extern status_t device_alias(
const char *name, device_t *parent, device_t *dest, device_t **_device);
extern status_t device_destroy(device_t *device);

extern void device_iterate(device_t *start, device_iterate_t func, void *data);
extern device_attr_t *device_attr(device_t *device, const char *name, int type);
extern char *device_path(device_t *device);

extern status_t device_get(device_t *device, uint32_t rights, uint32_t flags,
object_handle_t **handlep);
extern status_t device_open(const char *path, uint32_t rights, uint32_t flags,
object_handle_t **handlep);
extern status_t device_get(
device_t *device, uint32_t rights, uint32_t flags,
object_handle_t **_handle);
extern status_t device_open(
const char *path, uint32_t rights, uint32_t flags,
object_handle_t **_handle);

extern status_t device_request(object_handle_t *handle, unsigned request,
const void *in, size_t in_size, void **outp, size_t *out_sizep);
extern status_t device_request(
object_handle_t *handle, unsigned request, const void *in, size_t in_size,
void **_out, size_t *_out_size);

extern void device_init(void);