-
Notifications
You must be signed in to change notification settings - Fork 71
/
memoryUnix.c
247 lines (195 loc) · 6.48 KB
/
memoryUnix.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
#include "pharovm/pharo.h"
#include <sys/mman.h>
#include <fcntl.h>
#include <signal.h>
#define roundDownToPage(v) ((v)&pageMask)
#define roundUpToPage(v) (((v)+pageSize-1)&pageMask)
sqInt uxMemoryExtraBytesLeft(sqInt includingSwap);
#if !defined(MAP_ANON)
# if defined(MAP_ANONYMOUS)
# define MAP_ANON MAP_ANONYMOUS
# else
# define MAP_ANON 0
# endif
#endif
#define MAP_PROT (PROT_READ | PROT_WRITE)
#if __OpenBSD__
#define MAP_FLAGS (MAP_ANON | MAP_PRIVATE | MAP_STACK)
#else
#define MAP_FLAGS (MAP_ANON | MAP_PRIVATE)
#endif
#define valign(x) ((x) & pageMask)
/*xxx THESE SHOULD BE COMMAND-LINE/ENVIRONMENT OPTIONS */
/* Note:
*
* The code allows memory to be overallocated; i.e., the initial
* block is reserved via mmap() and then the unused portion
* munmap()ped from the top end. This is INHERENTLY DANGEROUS since
* malloc() may randomly map new memory in the block we "reserved"
* and subsequently unmap()ped. Enabling this causes crashes in
* Croquet, which makes heavy use of the FFI and thus calls malloc()
* all over the place.
*
* For this reason, overallocateMemory is DISABLED by default.
*
* The upshot of all this is that Squeak will claim (and hold on to)
* ALL of the available virtual memory (or at least 75% of it) when
* it starts up. If you can't live with that, use the -memory
* option to allocate a fixed size heap.
*/
int overallocateMemory = 0;
static sqInt devZero = -1;
static char *heap = 0;
static sqInt heapSize = 0;
static sqInt heapLimit = 0;
#ifndef max
# define max(a, b) (((a) > (b)) ? (a) : (b))
#endif
#ifndef min
# define min(a, b) (((a) < (b)) ? (a) : (b))
#endif
static sqInt pageSize = 0;
static usqInt pageMask = 0;
int mmapErrno = 0;
void
sqMakeMemoryExecutableFromTo(unsigned long startAddr, unsigned long endAddr)
{
// sqInt firstPage = roundDownToPage(startAddr);
// if (mprotect((void *)firstPage,
// endAddr - firstPage + 1,
// PROT_READ | PROT_WRITE | PROT_EXEC) < 0){
// logError("mprotect(x,y,PROT_READ | PROT_WRITE | PROT_EXEC)");
// logError("ERRNO: %d\n", errno);
// exit(1);
// }
}
void
sqMakeMemoryNotExecutableFromTo(unsigned long startAddr, unsigned long endAddr)
{
// sqInt firstPage = roundDownToPage(startAddr);
/* Arguably this is pointless since allocated memory always does include
* write permission. Annoyingly the mprotect call fails on both linux &
* mac os x. So make the whole thing a nop.
*/
// if (mprotect((void *)firstPage,
// endAddr - firstPage + 1,
// PROT_READ | PROT_WRITE) < 0)
// logErrorFromErrno("mprotect(x,y,PROT_READ | PROT_WRITE)");
}
void* allocateJITMemory(usqInt desiredSize, usqInt desiredPosition){
pageMask = ~(getpagesize() - 1);
usqInt alignedSize = valign(max(desiredSize, 1));
usqInt desiredBaseAddressAligned = valign(desiredPosition);
void* result;
#if __APPLE__
int additionalFlags = MAP_JIT;
#else
int additionalFlags = 0;
#endif
logDebug("Trying to allocate JIT memory in %p\n", (void* )desiredBaseAddressAligned);
if (MAP_FAILED == (result = mmap((void*) desiredBaseAddressAligned, alignedSize,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_FLAGS | additionalFlags, -1, 0))) {
logErrorFromErrno("Could not allocate JIT memory");
exit(1);
}
return result;
}
/* answer the address of (minHeapSize <= N <= desiredHeapSize) bytes of memory. */
usqInt
sqAllocateMemory(usqInt minHeapSize, usqInt desiredHeapSize, usqInt desiredBaseAddress) {
if (heap) {
logError("uxAllocateMemory: already called\n");
exit(1);
}
pageSize = getpagesize();
pageMask = ~(pageSize - 1);
heapLimit = valign(max(desiredHeapSize, 1));
usqInt desiredBaseAddressAligned = valign(desiredBaseAddress);
logDebug("Trying to load the image in %p\n",
(void* )desiredBaseAddressAligned);
while ((!heap) && (heapLimit >= minHeapSize)) {
if (MAP_FAILED == (heap = mmap((void*) desiredBaseAddressAligned, heapLimit, MAP_PROT, MAP_FLAGS, devZero, 0))) {
heap = 0;
heapLimit = valign(heapLimit / 4 * 3);
}
/*
* If we are in linux we have the problem that maybe it gives us a memory location too high in the memory map.
* To avoid it, we force to use the required base address
*/
#ifndef __APPLE__
if(heap != MAP_FAILED && (usqInt)heap != desiredBaseAddressAligned){
desiredBaseAddressAligned = valign(desiredBaseAddressAligned + pageSize);
if((usqInt)heap < desiredBaseAddress){
logError("I cannot find a good memory address starting from: %p", (void*)desiredBaseAddress);
exit(-1);
}
//If I overflow.
if(desiredBaseAddress > desiredBaseAddressAligned){
logError("I cannot find a good memory address starting from: %p", (void*)desiredBaseAddress);
exit(-1);
}
munmap(heap, heapLimit);
heap = 0;
}
#endif
}
if (!heap) {
logError("Failed to allocate at least %lld bytes)\n",
(long long )minHeapSize);
exit(-1);
}
heapSize = heapLimit;
logDebug("Loading the image in %p\n", (void* )heap);
return (usqInt) heap;
}
/* answer the number of bytes available for growing the heap. */
sqInt uxMemoryExtraBytesLeft(sqInt includingSwap)
{
return heapLimit - heapSize;
}
sqInt sqMemoryExtraBytesLeft(sqInt includingSwap) { return uxMemoryExtraBytesLeft(includingSwap); }
/* Deallocate a region of memory previously allocated by
* sqAllocateMemorySegmentOfSizeAboveAllocatedSizeInto. Cannot fail.
*/
void
sqDeallocateMemorySegmentAtOfSize(void *addr, sqInt sz)
{
if (munmap(addr, sz) != 0)
logErrorFromErrno("sqDeallocateMemorySegment... munmap");
}
void *
sqAllocateMemorySegmentOfSizeAboveAllocatedSizeInto(sqInt size, void *minAddress, sqInt *allocatedSizePointer)
{
void *alloc;
long bytes = roundUpToPage(size);
void *startAddress;
int count = 0;
if (!pageSize) {
pageSize = getpagesize();
pageMask = pageSize - 1;
}
*allocatedSizePointer = bytes;
while ((char *)minAddress + bytes > (char *)minAddress) {
startAddress = (void*)roundUpToPage((sqInt)minAddress);
alloc = mmap(startAddress, bytes,
PROT_READ | PROT_WRITE, MAP_ANON | MAP_SHARED, -1, 0);
if (alloc == MAP_FAILED) {
logWarnFromErrno("sqAllocateMemorySegmentOfSizeAboveAllocatedSizeInto mmap");
return 0;
}
if(count >= 6){
logTrace("More than 6 retries... maybe something is wrong\n");
}
logTrace("Asked: %10p %10p %10p\n", alloc, minAddress, startAddress);
if (alloc >= minAddress){
logTrace("Allocated Piece: %10p\n", alloc);
return alloc;
}
count++;
if (munmap(alloc, bytes) != 0)
logWarnFromErrno("sqAllocateMemorySegment... munmap");
minAddress = (void *)((char *)minAddress + bytes);
}
return 0;
}