Skip to content

Commit

Permalink
Do not re-init cpu after SPL and few mmu updates
Browse files Browse the repository at this point in the history
  • Loading branch information
zeusk committed Mar 16, 2012
1 parent 75a7d2d commit e03e71f
Show file tree
Hide file tree
Showing 10 changed files with 133 additions and 211 deletions.
41 changes: 2 additions & 39 deletions arch/arm/arch.c
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,6 @@ static void set_vector_base(addr_t addr)

void arch_early_init(void)
{
/* turn off the cache */
arch_disable_cache(UCACHE);

/* set the vector base to our exception vectors so we dont need to double map at 0 */
#if ARM_CPU_CORTEX_A8
set_vector_base(MEMBASE);
Expand All @@ -50,9 +47,6 @@ void arch_early_init(void)
platform_init_mmu_mappings();
#endif

/* turn the cache back on */
arch_enable_cache(UCACHE);

This comment has been minimized.

Copy link
@zeusk

zeusk Mar 16, 2012

Author Owner

We don't use icache or dcache, so let it remain disabled.


#if ARM_WITH_NEON
/* enable cp10 and cp11 */
uint32_t val;
Expand All @@ -61,43 +55,12 @@ void arch_early_init(void)
__asm__ volatile("mcr p15, 0, %0, c1, c0, 2" :: "r" (val));

/* set enable bit in fpexc */
val = (1<<30);
__asm__ volatile("mrc p10, 7, %0, c8, c0, 0" : "=r" (val));
val |= (1<<30);
__asm__ volatile("mcr p10, 7, %0, c8, c0, 0" :: "r" (val));
#endif
#if ARM_CPU_CORTEX_A8

This comment has been minimized.

Copy link
@zeusk

zeusk Mar 16, 2012

Author Owner

Neither do we use performance counters.

/* enable the cycle count register */
uint32_t en;
__asm__ volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (en));
en &= ~(1<<3); /* cycle count every cycle */
en |= 1; /* enable all performance counters */
__asm__ volatile("mcr p15, 0, %0, c9, c12, 0" :: "r" (en));

/* enable cycle counter */
en = (1<<31);
__asm__ volatile("mcr p15, 0, %0, c9, c12, 1" :: "r" (en));
#endif
}
void arch_quiesce(void)
{
#if ARM_CPU_CORTEX_A8
/* disable the cycle count and performance counters */
uint32_t en;
__asm__ volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (en));
en &= ~1; /* disable all performance counters */
__asm__ volatile("mcr p15, 0, %0, c9, c12, 0" :: "r" (en));

/* disable cycle counter */
en = 0;
__asm__ volatile("mcr p15, 0, %0, c9, c12, 1" :: "r" (en));
#endif
#if ARM_CPU_ARM1136
/* disable the cycle count and performance counters */
uint32_t en;
__asm__ volatile("mrc p15, 0, %0, c15, c12, 0" : "=r" (en));
en &= ~1; /* disable all performance counters */
__asm__ volatile("mcr p15, 0, %0, c15, c12, 0" :: "r" (en));
#endif
}
void arch_init(void)
{
}
Expand Down
12 changes: 0 additions & 12 deletions arch/arm/asm.S
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,6 @@
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <asm.h>


/* context switch frame is as follows:
* ulr
* usp
Expand All @@ -43,10 +41,8 @@ FUNCTION(arm_context_switch)
sub r3, sp, #(11*4) /* can't use sp in user mode stm */
mov r12, lr
stmia r3, { r4-r11, r12, r13, r14 }^

/* save old sp */
str r3, [r0]

/* clear any exlusive locks that the old thread holds */
#if ARM_ISA_ARMV7
/* can clear it directly */
Expand All @@ -56,18 +52,15 @@ FUNCTION(arm_context_switch)
ldr r0, =strex_spot
strex r3, r2, [r0]
#endif

/* load new regs */
ldmia r1, { r4-r11, r12, r13, r14 }^
mov lr, r12 /* restore lr */
add sp, r1, #(11*4) /* restore sp */
bx lr

.ltorg

FUNCTION(arm_save_mode_regs)
mrs r1, cpsr

#if ARM_ISA_ARMv6
cps #0x11 /* fiq */
str r13, [r0], #4
Expand All @@ -91,13 +84,8 @@ FUNCTION(arm_save_mode_regs)
// XXX implement
b .
#endif

msr cpsr_c, r1

bx lr

.data
strex_spot:
.word 0


84 changes: 24 additions & 60 deletions arch/arm/cache-ops.S
Original file line number Diff line number Diff line change
Expand Up @@ -172,52 +172,6 @@ FUNCTION(arch_disable_cache)
msr cpsr, r12
ldmfd sp!, {r4-r11, pc}

/* void arch_enable_cache(uint flags) */
FUNCTION(arch_enable_cache)
stmfd sp!, {r4-r11, lr}

mov r7, r0 // save flags

mrs r12, cpsr // save the old interrupt state
.word 0xf10c01c0 /* cpsid iaf */ // interrupts disabled

.Ldcache_enable:
tst r7, #DCACHE
beq .Licache_enable
mrc p15, 0, r0, c1, c0, 0 // cr1
tst r0, #(1<<2) // is the dcache already enabled?
bne .Licache_enable

// invalidate L1 and L2
// NOTE: trashes a bunch of registers, can't be spilling stuff to the stack
bl invalidate_cache_v7

#if ARM_WITH_L2
// enable the L2, if present
mrc p15, 0, r0, c1, c0, 1 // aux cr1
orr r0, #(1<<1)
mcr p15, 0, r0, c1, c0, 1 // enable L2 dcache
#endif

mrc p15, 0, r0, c1, c0, 0 // cr1
orr r0, #(1<<2)
mcr p15, 0, r0, c1, c0, 0 // enable dcache

.Licache_enable:
tst r7, #ICACHE
beq .Ldone_enable

mov r0, #0
mcr p15, 0, r0, c7, c5, 0 // invalidate icache to PoU

mrc p15, 0, r0, c1, c0, 0 // cr1
orr r0, #(1<<12)
mcr p15, 0, r0, c1, c0, 0 // enable icache

.Ldone_enable:
msr cpsr, r12
ldmfd sp!, {r4-r11, pc}

// flush & invalidate cache routine, trashes r0-r6, r9-r11
flush_invalidate_cache_v7:
/* from ARMv7 manual, B2-17 */
Expand Down Expand Up @@ -318,21 +272,17 @@ invalidate_cache_v7:
/* shared cache flush routines */

/* void arch_flush_cache_range(addr_t start, size_t len); */
FUNCTION(arch_clean_cache_range)
#if ARM_WITH_CP15
add r2, r0, r1 // calculate the end address
bic r0, #(CACHE_LINE-1) // align the start with a cache line
0:
mcr p15, 0, r0, c7, c10, 1 // clean cache to PoC by MVA
add r0, r0, #CACHE_LINE
cmp r0, r2
blo 0b

mov r0, #0
mcr p15, 0, r0, c7, c10, 4 // data sync barrier
#endif
bx lr
FUNCTION(arch_clean_cache_range)
0:
mcr p15, 0, r0, c7, c10, 1 // clean cache to PoC by MVA
add r0, r0, #CACHE_LINE
subs r1, r1, #CACHE_LINE
bhs 0b

mov r0, #0
mcr p15, 0, r0, c7, c10, 4 // data sync barrier (formerly drain write buffer)

bx lr

/* void arch_flush_invalidate_cache_range(addr_t start, size_t len); */
FUNCTION(arch_clean_invalidate_cache_range)
Expand All @@ -346,6 +296,17 @@ FUNCTION(arch_clean_invalidate_cache_range)
mcr p15, 0, r0, c7, c10, 4 // data sync barrier (formerly drain write buffer)

bx lr

/* void arch_sync_cache_range(addr_t start, size_t len); */
FUNCTION(arch_sync_cache_range)
push { r14 }
bl arch_clean_cache_range

mov r0, #0
mcr p15, 0, r0, c7, c5, 0 // invalidate icache to PoU

pop { pc }

#else
#error unhandled cpu
#endif
Expand All @@ -366,5 +327,8 @@ FUNCTION(arch_clean_cache_range)
FUNCTION(arch_clean_invalidate_cache_range)
bx lr

FUNCTION(arch_sync_cache_range)
bx lr

#endif // ARM_WITH_CACHE

1 change: 0 additions & 1 deletion arch/arm/compile.mk
Original file line number Diff line number Diff line change
Expand Up @@ -30,4 +30,3 @@ $(BUILDDIR)/%.o: %.S $(SRCDEPS)
@$(MKDIR)
@echo compiling $<
$(NOECHO)$(CC) $(CFLAGS) $(ASMFLAGS) $(INCLUDES) -c $< -MD -MT $@ -MF $(@:%o=%d) -o $@

75 changes: 28 additions & 47 deletions arch/arm/crt0.S
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
#define DSB .byte 0x4f, 0xf0, 0x7f, 0xf5
#define ISB .byte 0x6f, 0xf0, 0x7f, 0xf5

.text
.section ".text.boot"
.globl _start
_start:
b reset
Expand All @@ -35,66 +35,47 @@ _start:
b arm_reserved
b arm_irq
b arm_fiq

#ifdef WSPL_VADDR
//if LK is loaded by wince spl add romhdr

.org 0x40
.word 0x43454345
.word (romhdr-_start)+WSPL_VADDR // virtual address of romhdr
//.word romhdr+0x96C00000 // virtual address of romhdr
.word romhdr-_start // file address of romhdr

.word (romhdr-_start)+WSPL_VADDR
.word romhdr-_start
.org 0x00000900
romhdr:
.word 0x2000000 // dllfirst
.word 0x2000000 // dlllast
.word WSPL_VADDR // physfirst
.word WSPL_VADDR+(_end-_start) // physlast
.word 0 // nummods (no TOCentry after ROMHDR)
.word WSPL_VADDR+(_end-_start) // ulRAMStart
.word WSPL_VADDR+(_end-_start) // ulRAMFree
.word WSPL_VADDR+MEMSIZE // ulRAMEnd
.word 0 // ulCopyEntries
.word 0 // ulCopyOffset
.word 0 // ulProfileLen
.word 0 // ulProfileOffset
.word 0 // numfiles
.word 0 // ulKernelFlags
.word 0x80808080 // ulFSRamPercent
.word 0 // ulDrivglobStart
.word 0 // ulDrivglobLen
.hword 0x1C2 // usCPUType
.hword 0x2 // usMiscFlags
.word 0 // pExtensions
.word 0 // ulTrackingStart
.word 0 // ulTrackingLen
.org 0x00001000
#endif

.word 0x2000000
.word 0x2000000
.word WSPL_VADDR
.word WSPL_VADDR+(_end-_start)
.word 0
.word WSPL_VADDR+(_end-_start)
.word WSPL_VADDR+(_end-_start)
.word WSPL_VADDR+MEMSIZE
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0x80808080
.word 0
.word 0
.hword 0x1C2
.hword 0x2
.word 0
.word 0
.word 0
.org 0x00001000

reset:
/* do some cpu setup */
#if ARM_WITH_CP15
/* new thumb behavior, low exception vectors, disable cache & mmu, enable align fault */
mrc p15, 0, r0, c1, c0, 0
/* XXX this is currently for arm926, revist with armv6 cores */
/* new thumb behavior, low exception vectors, i/d cache disable, mmu disabled */
bic r0, r0, #(1<<15| 1<<13 | 1<<12)
bic r0, r0, #(1<<2 | 1<<0)
/* enable alignment faults */
orr r0, r0, #(1<<1)
mcr p15, 0, r0, c1, c0, 0
#endif

#if WITH_CPU_EARLY_INIT
/* call platform/arch/etc specific init code */
bl __cpu_early_init

/* declare return address as global to avoid using stack */
.globl _cpu_early_init_complete
_cpu_early_init_complete:

#endif

#if (!ENABLE_NANDWRITE)
#if WITH_CPU_WARM_BOOT
ldr r0, warm_boot_tag
Expand Down
28 changes: 24 additions & 4 deletions arch/arm/include/arch/arm/mmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,13 +31,33 @@ extern "C" {

void arm_mmu_init(void);

#define MMU_FLAG_CACHED 0x1
#define MMU_FLAG_BUFFERED 0x2
#define MMU_FLAG_READWRITE 0x4
#if defined(ARM_ISA_ARMV6) | defined(ARM_ISA_ARMV7)

/* C, B and TEX[2:0] encodings without TEX remap */
/* TEX | CB */
#define MMU_MEMORY_TYPE_STRONGLY_ORDERED ((0x0 << 12) | (0x0 << 2))
#define MMU_MEMORY_TYPE_DEVICE_SHARED ((0x0 << 12) | (0x1 << 2))
#define MMU_MEMORY_TYPE_DEVICE_NON_SHARED ((0x2 << 12) | (0x0 << 2))
#define MMU_MEMORY_TYPE_NORMAL ((0x1 << 12) | (0x0 << 2))
#define MMU_MEMORY_TYPE_NORMAL_WRITE_THROUGH ((0x0 << 12) | (0x2 << 2))
#define MMU_MEMORY_TYPE_NORMAL_WRITE_BACK_NO_ALLOCATE ((0x0 << 12) | (0x3 << 2))
#define MMU_MEMORY_TYPE_NORMAL_WRITE_BACK_ALLOCATE ((0x1 << 12) | (0x3 << 2))

#define MMU_MEMORY_AP_NO_ACCESS (0x0 << 10)
#define MMU_MEMORY_AP_READ_ONLY (0x7 << 10)
#define MMU_MEMORY_AP_READ_WRITE (0x3 << 10)

#define MMU_MEMORY_XN (0x1 << 4)

#else

#error "MMU implementation needs to be updated for this ARM architecture"

#endif

void arm_mmu_map_section(addr_t paddr, addr_t vaddr, uint flags);
void arm_mmu_unmap_section(addr_t vaddr);


#if defined(__cplusplus)
}
#endif
Expand Down
13 changes: 3 additions & 10 deletions arch/arm/include/arch/defines.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,17 +26,10 @@
/* arm specific stuff */
#define PAGE_SIZE 4096

#if ARM_CPU_ARM7
/* irrelevant, no consistent cache */
#define CACHE_LINE 32
#elif ARM_CPU_ARM926
#define CACHE_LINE 32
#elif ARM_CPU_ARM1136
#define CACHE_LINE 32
#elif ARM_CPU_CORTEX_A8
#define CACHE_LINE 64
#if defined(ARM_CPU_CORTEX_A8)
#define CACHE_LINE 64
#else
#error unknown cpu
#error unknown cpu
#endif

#endif
Expand Down
Loading

0 comments on commit e03e71f

Please sign in to comment.