6
6
//
7
7
// ===----------------------------------------------------------------------===//
8
8
9
+ #include " Arch/ARM64Common.h"
9
10
#include " InputFiles.h"
10
11
#include " Symbols.h"
11
12
#include " SyntheticSections.h"
@@ -25,22 +26,13 @@ using namespace lld::macho;
25
26
26
27
namespace {
27
28
28
- struct ARM64 : TargetInfo {
29
+ struct ARM64 : ARM64Common {
29
30
ARM64 ();
30
-
31
- int64_t getEmbeddedAddend (MemoryBufferRef, uint64_t offset,
32
- const relocation_info) const override ;
33
- void relocateOne (uint8_t *loc, const Reloc &, uint64_t va,
34
- uint64_t pc) const override ;
35
-
36
31
void writeStub (uint8_t *buf, const Symbol &) const override ;
37
32
void writeStubHelperHeader (uint8_t *buf) const override ;
38
33
void writeStubHelperEntry (uint8_t *buf, const DylibSymbol &,
39
34
uint64_t entryAddr) const override ;
40
-
41
- void relaxGotLoad (uint8_t *loc, uint8_t type) const override ;
42
35
const RelocAttrs &getRelocAttrs (uint8_t type) const override ;
43
- uint64_t getPageSize () const override { return 16 * 1024 ; }
44
36
};
45
37
46
38
} // namespace
@@ -77,156 +69,14 @@ const RelocAttrs &ARM64::getRelocAttrs(uint8_t type) const {
77
69
return relocAttrsArray[type];
78
70
}
79
71
80
- int64_t ARM64::getEmbeddedAddend (MemoryBufferRef mb, uint64_t offset,
81
- const relocation_info rel) const {
82
- if (rel.r_type != ARM64_RELOC_UNSIGNED &&
83
- rel.r_type != ARM64_RELOC_SUBTRACTOR) {
84
- // All other reloc types should use the ADDEND relocation to store their
85
- // addends.
86
- // TODO(gkm): extract embedded addend just so we can assert that it is 0
87
- return 0 ;
88
- }
89
-
90
- auto *buf = reinterpret_cast <const uint8_t *>(mb.getBufferStart ());
91
- const uint8_t *loc = buf + offset + rel.r_address ;
92
- switch (rel.r_length ) {
93
- case 2 :
94
- return static_cast <int32_t >(read32le (loc));
95
- case 3 :
96
- return read64le (loc);
97
- default :
98
- llvm_unreachable (" invalid r_length" );
99
- }
100
- }
101
-
102
- inline uint64_t bitField (uint64_t value, int right, int width, int left) {
103
- return ((value >> right) & ((1 << width) - 1 )) << left;
104
- }
105
-
106
- // 25 0
107
- // +-----------+---------------------------------------------------+
108
- // | | imm26 |
109
- // +-----------+---------------------------------------------------+
110
-
111
- inline uint64_t encodeBranch26 (const Reloc &r, uint64_t base, uint64_t va) {
112
- checkInt (r, va, 28 );
113
- // Since branch destinations are 4-byte aligned, the 2 least-
114
- // significant bits are 0. They are right shifted off the end.
115
- return (base | bitField (va, 2 , 26 , 0 ));
116
- }
117
-
118
- inline uint64_t encodeBranch26 (SymbolDiagnostic d, uint64_t base, uint64_t va) {
119
- checkInt (d, va, 28 );
120
- return (base | bitField (va, 2 , 26 , 0 ));
121
- }
122
-
123
- // 30 29 23 5
124
- // +-+---+---------+-------------------------------------+---------+
125
- // | |ilo| | immhi | |
126
- // +-+---+---------+-------------------------------------+---------+
127
-
128
- inline uint64_t encodePage21 (const Reloc &r, uint64_t base, uint64_t va) {
129
- checkInt (r, va, 35 );
130
- return (base | bitField (va, 12 , 2 , 29 ) | bitField (va, 14 , 19 , 5 ));
131
- }
132
-
133
- inline uint64_t encodePage21 (SymbolDiagnostic d, uint64_t base, uint64_t va) {
134
- checkInt (d, va, 35 );
135
- return (base | bitField (va, 12 , 2 , 29 ) | bitField (va, 14 , 19 , 5 ));
136
- }
137
-
138
- // 21 10
139
- // +-------------------+-----------------------+-------------------+
140
- // | | imm12 | |
141
- // +-------------------+-----------------------+-------------------+
142
-
143
- inline uint64_t encodePageOff12 (uint32_t base, uint64_t va) {
144
- int scale = 0 ;
145
- if ((base & 0x3b00'0000 ) == 0x3900'0000 ) { // load/store
146
- scale = base >> 30 ;
147
- if (scale == 0 && (base & 0x0480'0000 ) == 0x0480'0000 ) // 128-bit variant
148
- scale = 4 ;
149
- }
150
-
151
- // TODO(gkm): extract embedded addend and warn if != 0
152
- // uint64_t addend = ((base & 0x003FFC00) >> 10);
153
- return (base | bitField (va, scale, 12 - scale, 10 ));
154
- }
155
-
156
- inline uint64_t pageBits (uint64_t address) {
157
- const uint64_t pageMask = ~0xfffull ;
158
- return address & pageMask;
159
- }
160
-
161
- // For instruction relocations (load, store, add), the base
162
- // instruction is pre-populated in the text section. A pre-populated
163
- // instruction has opcode & register-operand bits set, with immediate
164
- // operands zeroed. We read it from text, OR-in the immediate
165
- // operands, then write-back the completed instruction.
166
-
167
- void ARM64::relocateOne (uint8_t *loc, const Reloc &r, uint64_t value,
168
- uint64_t pc) const {
169
- uint32_t base = ((r.length == 2 ) ? read32le (loc) : 0 );
170
- value += r.addend ;
171
- switch (r.type ) {
172
- case ARM64_RELOC_BRANCH26:
173
- value = encodeBranch26 (r, base, value - pc);
174
- break ;
175
- case ARM64_RELOC_SUBTRACTOR:
176
- case ARM64_RELOC_UNSIGNED:
177
- if (r.length == 2 )
178
- checkInt (r, value, 32 );
179
- break ;
180
- case ARM64_RELOC_POINTER_TO_GOT:
181
- if (r.pcrel )
182
- value -= pc;
183
- checkInt (r, value, 32 );
184
- break ;
185
- case ARM64_RELOC_PAGE21:
186
- case ARM64_RELOC_GOT_LOAD_PAGE21:
187
- case ARM64_RELOC_TLVP_LOAD_PAGE21: {
188
- assert (r.pcrel );
189
- value = encodePage21 (r, base, pageBits (value) - pageBits (pc));
190
- break ;
191
- }
192
- case ARM64_RELOC_PAGEOFF12:
193
- case ARM64_RELOC_GOT_LOAD_PAGEOFF12:
194
- case ARM64_RELOC_TLVP_LOAD_PAGEOFF12:
195
- assert (!r.pcrel );
196
- value = encodePageOff12 (base, value);
197
- break ;
198
- default :
199
- llvm_unreachable (" unexpected relocation type" );
200
- }
201
-
202
- switch (r.length ) {
203
- case 2 :
204
- write32le (loc, value);
205
- break ;
206
- case 3 :
207
- write64le (loc, value);
208
- break ;
209
- default :
210
- llvm_unreachable (" invalid r_length" );
211
- }
212
- }
213
-
214
72
static constexpr uint32_t stubCode[] = {
215
73
0x90000010 , // 00: adrp x16, __la_symbol_ptr@page
216
74
0xf9400210 , // 04: ldr x16, [x16, __la_symbol_ptr@pageoff]
217
75
0xd61f0200 , // 08: br x16
218
76
};
219
77
220
78
void ARM64::writeStub (uint8_t *buf8, const Symbol &sym) const {
221
- auto *buf32 = reinterpret_cast <uint32_t *>(buf8);
222
- uint64_t pcPageBits =
223
- pageBits (in.stubs ->addr + sym.stubsIndex * sizeof (stubCode));
224
- uint64_t lazyPointerVA =
225
- in.lazyPointers ->addr + sym.stubsIndex * LP64::wordSize;
226
- buf32[0 ] = encodePage21 ({&sym, " stub" }, stubCode[0 ],
227
- pageBits (lazyPointerVA) - pcPageBits);
228
- buf32[1 ] = encodePageOff12 (stubCode[1 ], lazyPointerVA);
229
- buf32[2 ] = stubCode[2 ];
79
+ ::writeStub<LP64, stubCode>(buf8, sym);
230
80
}
231
81
232
82
static constexpr uint32_t stubHelperHeaderCode[] = {
@@ -239,22 +89,7 @@ static constexpr uint32_t stubHelperHeaderCode[] = {
239
89
};
240
90
241
91
void ARM64::writeStubHelperHeader (uint8_t *buf8) const {
242
- auto *buf32 = reinterpret_cast <uint32_t *>(buf8);
243
- auto pcPageBits = [](int i) {
244
- return pageBits (in.stubHelper ->addr + i * sizeof (uint32_t ));
245
- };
246
- uint64_t loaderVA = in.imageLoaderCache ->getVA ();
247
- SymbolDiagnostic d = {nullptr , " stub header helper" };
248
- buf32[0 ] = encodePage21 (d, stubHelperHeaderCode[0 ],
249
- pageBits (loaderVA) - pcPageBits (0 ));
250
- buf32[1 ] = encodePageOff12 (stubHelperHeaderCode[1 ], loaderVA);
251
- buf32[2 ] = stubHelperHeaderCode[2 ];
252
- uint64_t binderVA =
253
- in.got ->addr + in.stubHelper ->stubBinder ->gotIndex * LP64::wordSize;
254
- buf32[3 ] = encodePage21 (d, stubHelperHeaderCode[3 ],
255
- pageBits (binderVA) - pcPageBits (3 ));
256
- buf32[4 ] = encodePageOff12 (stubHelperHeaderCode[4 ], binderVA);
257
- buf32[5 ] = stubHelperHeaderCode[5 ];
92
+ ::writeStubHelperHeader<LP64, stubHelperHeaderCode>(buf8);
258
93
}
259
94
260
95
static constexpr uint32_t stubHelperEntryCode[] = {
@@ -265,34 +100,10 @@ static constexpr uint32_t stubHelperEntryCode[] = {
265
100
266
101
void ARM64::writeStubHelperEntry (uint8_t *buf8, const DylibSymbol &sym,
267
102
uint64_t entryVA) const {
268
- auto *buf32 = reinterpret_cast <uint32_t *>(buf8);
269
- auto pcVA = [entryVA](int i) { return entryVA + i * sizeof (uint32_t ); };
270
- uint64_t stubHelperHeaderVA = in.stubHelper ->addr ;
271
- buf32[0 ] = stubHelperEntryCode[0 ];
272
- buf32[1 ] = encodeBranch26 ({&sym, " stub helper" }, stubHelperEntryCode[1 ],
273
- stubHelperHeaderVA - pcVA (1 ));
274
- buf32[2 ] = sym.lazyBindOffset ;
275
- }
276
-
277
- void ARM64::relaxGotLoad (uint8_t *loc, uint8_t type) const {
278
- // The instruction format comments below are quoted from
279
- // Arm® Architecture Reference Manual
280
- // Armv8, for Armv8-A architecture profile
281
- // ARM DDI 0487G.a (ID011921)
282
- uint32_t instruction = read32le (loc);
283
- // C6.2.132 LDR (immediate)
284
- // LDR <Xt>, [<Xn|SP>{, #<pimm>}]
285
- if ((instruction & 0xffc00000 ) != 0xf9400000 )
286
- error (getRelocAttrs (type).name + " reloc requires LDR instruction" );
287
- assert (((instruction >> 10 ) & 0xfff ) == 0 &&
288
- " non-zero embedded LDR immediate" );
289
- // C6.2.4 ADD (immediate)
290
- // ADD <Xd|SP>, <Xn|SP>, #<imm>{, <shift>}
291
- instruction = ((instruction & 0x001fffff ) | 0x91000000 );
292
- write32le (loc, instruction);
103
+ ::writeStubHelperEntry<stubHelperEntryCode>(buf8, sym, entryVA);
293
104
}
294
105
295
- ARM64::ARM64 () : TargetInfo (LP64()) {
106
+ ARM64::ARM64 () : ARM64Common (LP64()) {
296
107
cpuType = CPU_TYPE_ARM64;
297
108
cpuSubtype = CPU_SUBTYPE_ARM64_ALL;
298
109
0 commit comments