-
-
Notifications
You must be signed in to change notification settings - Fork 335
/
arch_mmu.cpp
728 lines (572 loc) · 17.8 KB
/
arch_mmu.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
/*
* Copyright 2004-2008, Axel Dörfler, axeld@pinc-software.de.
* Based on code written by Travis Geiselbrecht for NewOS.
*
* Distributed under the terms of the MIT License.
*/
#include "arch_mmu.h"
#include <boot/platform.h>
#include <boot/stdio.h>
#include <boot/kernel_args.h>
#include <boot/stage2.h>
#include <arch/cpu.h>
#include <arch_kernel.h>
#include <arm_mmu.h>
#include <kernel.h>
#include <board_config.h>
#include <OS.h>
#include <string.h>
//#define TRACE_MMU
#ifdef TRACE_MMU
# define TRACE(x) dprintf x
#else
# define TRACE(x) ;
#endif
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
#define TRACE_MEMORY_MAP
// Define this to print the memory map to serial debug,
// You also need to define ENABLE_SERIAL in serial.cpp
// for output to work.
/*
TODO:
-recycle bit!
*/
/*! The (physical) memory layout of the boot loader is currently as follows:
0x00000000 u-boot (run from NOR flash)
0xa0000000 u-boot stuff like kernel arguments afaik
0xa0100000 - 0xa0ffffff boot.tgz (up to 15MB probably never needed so big...)
0xa1000000 - 0xa1ffffff pagetables
0xa2000000 - ? code (up to 1MB)
0xa2100000 boot loader heap / free physical memory
The kernel is mapped at KERNEL_BASE, all other stuff mapped by the
loader (kernel args, modules, driver settings, ...) comes after
0x80020000 which means that there is currently only 2 MB reserved for
the kernel itself (see kMaxKernelSize).
*/
/*
*defines a block in memory
*/
struct memblock {
const char name[16];
// the name will be used for debugging etc later perhaps...
addr_t start;
// start of the block
addr_t end;
// end of the block
uint32 flags;
// which flags should be applied (device/normal etc..)
};
static struct memblock LOADER_MEMORYMAP[] = {
{
"devices",
DEVICE_BASE,
DEVICE_BASE + DEVICE_SIZE - 1,
ARM_MMU_L2_FLAG_B,
},
{
"RAM_loader", // 1MB loader
SDRAM_BASE + 0,
SDRAM_BASE + 0x0fffff,
ARM_MMU_L2_FLAG_C,
},
{
"RAM_pt", // Page Table 1MB
SDRAM_BASE + 0x100000,
SDRAM_BASE + 0x1FFFFF,
ARM_MMU_L2_FLAG_C,
},
{
"RAM_free", // 16MB free RAM (more but we don't map it automaticaly)
SDRAM_BASE + 0x0200000,
SDRAM_BASE + 0x11FFFFF,
ARM_MMU_L2_FLAG_C,
},
{
"RAM_stack", // stack
SDRAM_BASE + 0x1200000,
SDRAM_BASE + 0x2000000,
ARM_MMU_L2_FLAG_C,
},
{
"RAM_initrd", // stack
SDRAM_BASE + 0x2000000,
SDRAM_BASE + 0x2500000,
ARM_MMU_L2_FLAG_C,
},
#ifdef FB_BASE
{
"framebuffer", // 2MB framebuffer ram
FB_BASE,
FB_BASE + FB_SIZE - 1,
ARM_MMU_L2_FLAG_AP_RW | ARM_MMU_L2_FLAG_C,
},
#endif
};
//static const uint32 kDefaultPageTableFlags = MMU_FLAG_READWRITE;
// not cached not buffered, R/W
static const size_t kMaxKernelSize = 0x200000; // 2 MB for the kernel
static addr_t sNextPhysicalAddress = 0; //will be set by mmu_init
static addr_t sNextVirtualAddress = KERNEL_BASE + kMaxKernelSize;
static addr_t sMaxVirtualAddress = KERNEL_BASE + kMaxKernelSize;
static addr_t sNextPageTableAddress = 0;
//the page directory is in front of the pagetable
static uint32 kPageTableRegionEnd = 0;
// working page directory and page table
static uint32 *sPageDirectory = 0 ;
//page directory has to be on a multiple of 16MB for
//some arm processors
static addr_t
get_next_virtual_address(size_t size)
{
addr_t address = sNextVirtualAddress;
sNextVirtualAddress += size;
return address;
}
static addr_t
get_next_virtual_address_alligned(size_t size, uint32 mask)
{
addr_t address = (sNextVirtualAddress) & mask;
sNextVirtualAddress = address + size;
return address;
}
static addr_t
get_next_physical_address(size_t size)
{
addr_t address = sNextPhysicalAddress;
sNextPhysicalAddress += size;
return address;
}
static addr_t
get_next_physical_address_alligned(size_t size, uint32 mask)
{
addr_t address = sNextPhysicalAddress & mask;
sNextPhysicalAddress = address + size;
return address;
}
static addr_t
get_next_virtual_page(size_t pagesize)
{
return get_next_virtual_address_alligned(pagesize, 0xffffffc0);
}
static addr_t
get_next_physical_page(size_t pagesize)
{
return get_next_physical_address_alligned(pagesize, 0xffffffc0);
}
/*
* Set translation table base
*/
void
mmu_set_TTBR(uint32 ttb)
{
ttb &= 0xffffc000;
asm volatile("MCR p15, 0, %[adr], c2, c0, 0"::[adr] "r" (ttb));
}
/*
* Flush the TLB
*/
void
mmu_flush_TLB()
{
uint32 value = 0;
asm volatile("MCR p15, 0, %[c8format], c8, c7, 0"::[c8format] "r" (value));
}
/*
* Read MMU Control Register
*/
uint32
mmu_read_C1()
{
uint32 controlReg = 0;
asm volatile("MRC p15, 0, %[c1out], c1, c0, 0":[c1out] "=r" (controlReg));
return controlReg;
}
/*
* Write MMU Control Register
*/
void
mmu_write_C1(uint32 value)
{
asm volatile("MCR p15, 0, %[c1in], c1, c0, 0"::[c1in] "r" (value));
}
void
mmu_write_DACR(uint32 value)
{
asm volatile("MCR p15, 0, %[c1in], c3, c0, 0"::[c1in] "r" (value));
}
static uint32 *
get_next_page_table(uint32 type)
{
TRACE(("get_next_page_table, sNextPageTableAddress 0x%" B_PRIxADDR
", kPageTableRegionEnd 0x%" B_PRIxADDR ", type 0x%" B_PRIx32 "\n",
sNextPageTableAddress, kPageTableRegionEnd, type));
size_t size = 0;
size_t entryCount = 0;
switch (type) {
case ARM_MMU_L1_TYPE_COARSE:
size = ARM_MMU_L2_COARSE_TABLE_SIZE;
entryCount = ARM_MMU_L2_COARSE_ENTRY_COUNT;
break;
case ARM_MMU_L1_TYPE_FINE:
size = ARM_MMU_L2_FINE_TABLE_SIZE;
entryCount = ARM_MMU_L2_FINE_ENTRY_COUNT;
break;
case ARM_MMU_L1_TYPE_SECTION:
// TODO: Figure out parameters for section types.
size = 16384;
break;
default:
panic("asked for unknown page table type: %#" B_PRIx32 "\n", type);
return NULL;
}
addr_t address = sNextPageTableAddress;
if (address < kPageTableRegionEnd)
sNextPageTableAddress += size;
else {
TRACE(("page table allocation outside of pagetable region!\n"));
address = get_next_physical_address_alligned(size, 0xffffffc0);
}
uint32 *pageTable = (uint32 *)address;
for (size_t i = 0; i < entryCount; i++)
pageTable[i] = 0;
return pageTable;
}
static uint32 *
get_or_create_page_table(addr_t address, uint32 type)
{
uint32 *pageTable = NULL;
uint32 pageDirectoryIndex = VADDR_TO_PDENT(address);
uint32 pageDirectoryEntry = sPageDirectory[pageDirectoryIndex];
uint32 entryType = pageDirectoryEntry & ARM_PDE_TYPE_MASK;
if (entryType == ARM_MMU_L1_TYPE_FAULT) {
// This page directory entry has not been set yet, allocate it.
pageTable = get_next_page_table(type);
sPageDirectory[pageDirectoryIndex] = (uint32)pageTable | type;
return pageTable;
}
if (entryType != type) {
// This entry has been allocated with a different type!
panic("tried to reuse page directory entry %" B_PRIu32
" with different type (entry: %#" B_PRIx32 ", new type: %#" B_PRIx32
")\n", pageDirectoryIndex, pageDirectoryEntry, type);
return NULL;
}
return (uint32 *)(pageDirectoryEntry & ARM_PDE_ADDRESS_MASK);
}
void
init_page_directory()
{
TRACE(("init_page_directory\n"));
uint32 smallType;
// see if subpages are disabled
if (mmu_read_C1() & (1 << 23))
smallType = ARM_MMU_L2_TYPE_SMALLNEW;
else
smallType = ARM_MMU_L2_TYPE_SMALLEXT;
gKernelArgs.arch_args.phys_pgdir = (uint32)sPageDirectory;
// clear out the page directory
for (uint32 i = 0; i < ARM_MMU_L1_TABLE_ENTRY_COUNT; i++)
sPageDirectory[i] = 0;
for (uint32 i = 0; i < ARRAY_SIZE(LOADER_MEMORYMAP); i++) {
TRACE(("BLOCK: %s START: %lx END %lx\n", LOADER_MEMORYMAP[i].name,
LOADER_MEMORYMAP[i].start, LOADER_MEMORYMAP[i].end));
addr_t address = LOADER_MEMORYMAP[i].start;
ASSERT((address & ~ARM_PTE_ADDRESS_MASK) == 0);
uint32 *pageTable = NULL;
uint32 pageTableIndex = 0;
while (address < LOADER_MEMORYMAP[i].end) {
if (pageTable == NULL
|| pageTableIndex >= ARM_MMU_L2_COARSE_ENTRY_COUNT) {
pageTable = get_or_create_page_table(address,
ARM_MMU_L1_TYPE_COARSE);
pageTableIndex = VADDR_TO_PTENT(address);
}
pageTable[pageTableIndex++]
= address | LOADER_MEMORYMAP[i].flags | smallType;
address += B_PAGE_SIZE;
}
}
// Map the page directory itself.
addr_t virtualPageDirectory = mmu_map_physical_memory(
(addr_t)sPageDirectory, ARM_MMU_L1_TABLE_SIZE, kDefaultPageFlags);
mmu_flush_TLB();
/* set up the translation table base */
mmu_set_TTBR((uint32)sPageDirectory);
mmu_flush_TLB();
/* set up the domain access register */
mmu_write_DACR(0xFFFFFFFF);
/* turn on the mmu */
mmu_write_C1(mmu_read_C1() | 0x1);
// Use the mapped page directory from now on.
sPageDirectory = (uint32 *)virtualPageDirectory;
gKernelArgs.arch_args.vir_pgdir = virtualPageDirectory;
}
/*! Adds a new page table for the specified base address */
static void
add_page_table(addr_t base)
{
TRACE(("add_page_table(base = %p)\n", (void *)base));
// Get new page table and clear it out
uint32 *pageTable = get_next_page_table(ARM_MMU_L1_TYPE_COARSE);
/*
if (pageTable > (uint32 *)(8 * 1024 * 1024)) {
panic("tried to add page table beyond the indentity mapped 8 MB "
"region\n");
}
*/
for (int32 i = 0; i < 256; i++)
pageTable[i] = 0;
// put the new page table into the page directory
sPageDirectory[VADDR_TO_PDENT(base)]
= (uint32)pageTable | ARM_MMU_L1_TYPE_COARSE;
}
/*! Creates an entry to map the specified virtualAddress to the given
physicalAddress.
If the mapping goes beyond the current page table, it will allocate
a new one. If it cannot map the requested page, it panics.
*/
static void
map_page(addr_t virtualAddress, addr_t physicalAddress, uint32 flags)
{
TRACE(("map_page: vaddr 0x%lx, paddr 0x%lx\n", virtualAddress,
physicalAddress));
if (virtualAddress < KERNEL_BASE) {
panic("map_page: asked to map invalid page %p!\n",
(void *)virtualAddress);
}
if (virtualAddress >= sMaxVirtualAddress) {
// we need to add a new page table
add_page_table(sMaxVirtualAddress);
sMaxVirtualAddress += B_PAGE_SIZE * 256;
if (virtualAddress >= sMaxVirtualAddress) {
panic("map_page: asked to map a page to %p\n",
(void *)virtualAddress);
}
}
physicalAddress &= ~(B_PAGE_SIZE - 1);
// map the page to the correct page table
uint32 *pageTable
= (uint32 *)(sPageDirectory[VADDR_TO_PDENT(virtualAddress)]
& ARM_PDE_ADDRESS_MASK);
TRACE(("map_page: pageTable 0x%lx\n",
sPageDirectory[VADDR_TO_PDENT(virtualAddress)] & ARM_PDE_ADDRESS_MASK));
if (pageTable == NULL) {
add_page_table(virtualAddress);
pageTable = (uint32 *)(sPageDirectory[VADDR_TO_PDENT(virtualAddress)]
& ARM_PDE_ADDRESS_MASK);
}
uint32 tableEntry = VADDR_TO_PTENT(virtualAddress);
TRACE(("map_page: inserting pageTable %p, tableEntry 0x%" B_PRIx32
", physicalAddress 0x%" B_PRIxADDR "\n", pageTable, tableEntry,
physicalAddress));
pageTable[tableEntry] = physicalAddress | flags;
mmu_flush_TLB();
TRACE(("map_page: done\n"));
}
// #pragma mark -
extern "C" addr_t
mmu_map_physical_memory(addr_t physicalAddress, size_t size, uint32 flags)
{
addr_t address = sNextVirtualAddress;
addr_t pageOffset = physicalAddress & (B_PAGE_SIZE - 1);
physicalAddress -= pageOffset;
for (addr_t offset = 0; offset < size; offset += B_PAGE_SIZE) {
map_page(get_next_virtual_page(B_PAGE_SIZE), physicalAddress + offset,
flags);
}
return address + pageOffset;
}
static void
unmap_page(addr_t virtualAddress)
{
TRACE(("unmap_page(virtualAddress = %p)\n", (void *)virtualAddress));
if (virtualAddress < KERNEL_BASE) {
panic("unmap_page: asked to unmap invalid page %p!\n",
(void *)virtualAddress);
}
// unmap the page from the correct page table
uint32 *pageTable
= (uint32 *)(sPageDirectory[VADDR_TO_PDENT(virtualAddress)]
& ARM_PDE_ADDRESS_MASK);
pageTable[VADDR_TO_PTENT(virtualAddress)] = 0;
mmu_flush_TLB();
}
extern "C" void *
mmu_allocate(void *virtualAddress, size_t size)
{
TRACE(("mmu_allocate: requested vaddr: %p, next free vaddr: 0x%lx, size: "
"%ld\n", virtualAddress, sNextVirtualAddress, size));
size = (size + B_PAGE_SIZE - 1) / B_PAGE_SIZE;
// get number of pages to map
if (virtualAddress != NULL) {
// This special path is almost only useful for loading the
// kernel into memory; it will only allow you to map the
// 'kMaxKernelSize' bytes following the kernel base address.
// Also, it won't check for already mapped addresses, so
// you better know why you are here :)
addr_t address = (addr_t)virtualAddress;
// is the address within the valid range?
if (address < KERNEL_BASE
|| address + size >= KERNEL_BASE + kMaxKernelSize) {
TRACE(("mmu_allocate in illegal range\n address: %" B_PRIx32
" KERNELBASE: %" B_PRIx32 " KERNEL_BASE + kMaxKernelSize: %"
B_PRIx32 " address + size : %" B_PRIx32 "\n", (uint32)address,
(uint32)KERNEL_BASE, (uint32)KERNEL_BASE + kMaxKernelSize,
(uint32)(address + size)));
return NULL;
}
for (uint32 i = 0; i < size; i++) {
map_page(address, get_next_physical_page(B_PAGE_SIZE),
kDefaultPageFlags);
address += B_PAGE_SIZE;
}
return virtualAddress;
}
void *address = (void *)sNextVirtualAddress;
for (uint32 i = 0; i < size; i++) {
map_page(get_next_virtual_page(B_PAGE_SIZE),
get_next_physical_page(B_PAGE_SIZE), kDefaultPageFlags);
}
return address;
}
/*! This will unmap the allocated chunk of memory from the virtual
address space. It might not actually free memory (as its implementation
is very simple), but it might.
*/
extern "C" void
mmu_free(void *virtualAddress, size_t size)
{
TRACE(("mmu_free(virtualAddress = %p, size: %ld)\n", virtualAddress, size));
addr_t address = (addr_t)virtualAddress;
size = (size + B_PAGE_SIZE - 1) / B_PAGE_SIZE;
// get number of pages to map
// is the address within the valid range?
if (address < KERNEL_BASE
|| address + size >= KERNEL_BASE + kMaxKernelSize) {
panic("mmu_free: asked to unmap out of range region (%p, size %lx)\n",
(void *)address, size);
}
// unmap all pages within the range
for (uint32 i = 0; i < size; i++) {
unmap_page(address);
address += B_PAGE_SIZE;
}
if (address == sNextVirtualAddress) {
// we can actually reuse the virtual address space
sNextVirtualAddress -= size;
}
}
/*! Sets up the final and kernel accessible GDT and IDT tables.
BIOS calls won't work any longer after this function has
been called.
*/
extern "C" void
mmu_init_for_kernel(void)
{
TRACE(("mmu_init_for_kernel\n"));
// save the memory we've physically allocated
gKernelArgs.physical_allocated_range[0].size
= sNextPhysicalAddress - gKernelArgs.physical_allocated_range[0].start;
// Save the memory we've virtually allocated (for the kernel and other
// stuff)
gKernelArgs.virtual_allocated_range[0].start = KERNEL_BASE;
gKernelArgs.virtual_allocated_range[0].size
= sNextVirtualAddress - KERNEL_BASE;
gKernelArgs.num_virtual_allocated_ranges = 1;
#ifdef TRACE_MEMORY_MAP
{
uint32 i;
dprintf("phys memory ranges:\n");
for (i = 0; i < gKernelArgs.num_physical_memory_ranges; i++) {
dprintf(" base 0x%08" B_PRIx64 ", length 0x%08" B_PRIx64 "\n",
gKernelArgs.physical_memory_range[i].start,
gKernelArgs.physical_memory_range[i].size);
}
dprintf("allocated phys memory ranges:\n");
for (i = 0; i < gKernelArgs.num_physical_allocated_ranges; i++) {
dprintf(" base 0x%08" B_PRIx64 ", length 0x%08" B_PRIx64 "\n",
gKernelArgs.physical_allocated_range[i].start,
gKernelArgs.physical_allocated_range[i].size);
}
dprintf("allocated virt memory ranges:\n");
for (i = 0; i < gKernelArgs.num_virtual_allocated_ranges; i++) {
dprintf(" base 0x%08" B_PRIx64 ", length 0x%08" B_PRIx64 "\n",
gKernelArgs.virtual_allocated_range[i].start,
gKernelArgs.virtual_allocated_range[i].size);
}
}
#endif
}
extern "C" void
mmu_init(void)
{
TRACE(("mmu_init\n"));
mmu_write_C1(mmu_read_C1() & ~((1 << 29) | (1 << 28) | (1 << 0)));
// access flag disabled, TEX remap disabled, mmu disabled
uint32 highestRAMAddress = SDRAM_BASE;
// calculate lowest RAM adress from MEMORYMAP
for (uint32 i = 0; i < ARRAY_SIZE(LOADER_MEMORYMAP); i++) {
if (strcmp("RAM_free", LOADER_MEMORYMAP[i].name) == 0)
sNextPhysicalAddress = LOADER_MEMORYMAP[i].start;
if (strcmp("RAM_pt", LOADER_MEMORYMAP[i].name) == 0) {
sNextPageTableAddress = LOADER_MEMORYMAP[i].start
+ ARM_MMU_L1_TABLE_SIZE;
kPageTableRegionEnd = LOADER_MEMORYMAP[i].end;
sPageDirectory = (uint32 *)LOADER_MEMORYMAP[i].start;
}
if (strncmp("RAM_", LOADER_MEMORYMAP[i].name, 4) == 0) {
if (LOADER_MEMORYMAP[i].end > highestRAMAddress)
highestRAMAddress = LOADER_MEMORYMAP[i].end;
}
}
gKernelArgs.physical_memory_range[0].start = SDRAM_BASE;
gKernelArgs.physical_memory_range[0].size = highestRAMAddress - SDRAM_BASE;
gKernelArgs.num_physical_memory_ranges = 1;
gKernelArgs.physical_allocated_range[0].start = SDRAM_BASE;
gKernelArgs.physical_allocated_range[0].size = 0;
gKernelArgs.num_physical_allocated_ranges = 1;
// remember the start of the allocated physical pages
init_page_directory();
// map in a kernel stack
gKernelArgs.cpu_kstack[0].size = KERNEL_STACK_SIZE
+ KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE;
gKernelArgs.cpu_kstack[0].start = (addr_t)mmu_allocate(NULL,
gKernelArgs.cpu_kstack[0].size);
TRACE(("kernel stack at 0x%" B_PRIx64 " to 0x%" B_PRIx64 "\n",
gKernelArgs.cpu_kstack[0].start,
gKernelArgs.cpu_kstack[0].start + gKernelArgs.cpu_kstack[0].size));
}
// #pragma mark -
extern "C" status_t
platform_allocate_region(void **_address, size_t size, uint8 protection,
bool /*exactAddress*/)
{
void *address = mmu_allocate(*_address, size);
if (address == NULL)
return B_NO_MEMORY;
*_address = address;
return B_OK;
}
extern "C" status_t
platform_free_region(void *address, size_t size)
{
mmu_free(address, size);
return B_OK;
}
void
platform_release_heap(struct stage2_args *args, void *base)
{
// It will be freed automatically, since it is in the
// identity mapped region, and not stored in the kernel's
// page tables.
}
status_t
platform_init_heap(struct stage2_args *args, void **_base, void **_top)
{
void *heap = (void *)get_next_physical_address(args->heap_size);
if (heap == NULL)
return B_NO_MEMORY;
*_base = heap;
*_top = (void *)((int8 *)heap + args->heap_size);
return B_OK;
}