@@ -210,8 +210,8 @@ void CIRRecordLowering::setBitFieldInfo(const FieldDecl *fd,
210
210
211
211
if (info.size > info.storageSize )
212
212
info.size = info.storageSize ;
213
- // Reverse the bit offsets for big endian machines. Because we represent
214
- // a bitfield as a single large integer load , we can imagine the bits
213
+ // Reverse the bit offsets for big endian machines. Since bitfields are laid
214
+ // out as packed bits within an integer-sized unit , we can imagine the bits
215
215
// counting from the most-significant-bit instead of the
216
216
// least-significant-bit.
217
217
assert (!cir::MissingFeatures::isBigEndian ());
@@ -281,35 +281,25 @@ void CIRRecordLowering::fillOutputFields() {
281
281
282
282
void CIRRecordLowering::accumulateBitFields (
283
283
RecordDecl::field_iterator field, RecordDecl::field_iterator fieldEnd) {
284
- // Run stores the first element of the current run of bitfields. FieldEnd is
285
- // used as a special value to note that we don't have a current run. A
284
+ // 'run' stores the first element of the current run of bitfields. 'fieldEnd'
285
+ // is used as a special value to note that we don't have a current run. A
286
286
// bitfield run is a contiguous collection of bitfields that can be stored in
287
287
// the same storage block. Zero-sized bitfields and bitfields that would
288
288
// cross an alignment boundary break a run and start a new one.
289
289
RecordDecl::field_iterator run = fieldEnd;
290
- // Tail is the offset of the first bit off the end of the current run. It's
290
+ // 'tail' is the offset of the first bit off the end of the current run. It's
291
291
// used to determine if the ASTRecordLayout is treating these two bitfields as
292
- // contiguous. StartBitOffset is offset of the beginning of the Run .
292
+ // contiguous. 'startBitOffset' is offset of the beginning of the run .
293
293
uint64_t startBitOffset, tail = 0 ;
294
294
assert (!cir::MissingFeatures::isDiscreteBitFieldABI ());
295
295
296
- // Check if OffsetInRecord (the size in bits of the current run) is better
296
+ // Check if 'offsetInRecord' (the size in bits of the current run) is better
297
297
// as a single field run. When OffsetInRecord has legal integer width, and
298
298
// its bitfield offset is naturally aligned, it is better to make the
299
299
// bitfield a separate storage component so as it can be accessed directly
300
300
// with lower cost.
301
- auto isBetterAsSingleFieldRun = [&](uint64_t offsetInRecord,
302
- uint64_t startBitOffset,
303
- uint64_t nextTail = 0 ) {
304
- if (!cirGenTypes.getCGModule ().getCodeGenOpts ().FineGrainedBitfieldAccesses )
305
- return false ;
306
- cirGenTypes.getCGModule ().errorNYI (field->getSourceRange (),
307
- " NYI FineGrainedBitfield" );
308
- return true ;
309
- };
301
+ assert (!cir::MissingFeatures::nonFineGrainedBitfields ());
310
302
311
- // The start field is better as a single field run.
312
- bool startFieldAsSingleRun = false ;
313
303
for (;;) {
314
304
// Check to see if we need to start a new run.
315
305
if (run == fieldEnd) {
@@ -321,27 +311,34 @@ void CIRRecordLowering::accumulateBitFields(
321
311
run = field;
322
312
startBitOffset = getFieldBitOffset (*field);
323
313
tail = startBitOffset + field->getBitWidthValue ();
324
- startFieldAsSingleRun =
325
- isBetterAsSingleFieldRun (tail - startBitOffset, startBitOffset);
314
+ assert (!cir::MissingFeatures::nonFineGrainedBitfields ());
326
315
}
327
316
++field;
328
317
continue ;
329
318
}
330
319
331
- // If the start field of a new run is better as a single run, or if current
332
- // field (or consecutive fields) is better as a single run, or if current
333
- // field has zero width bitfield and either UseZeroLengthBitfieldAlignment
334
- // or UseBitFieldTypeAlignment is set to true, or if the offset of current
335
- // field is inconsistent with the offset of previous field plus its offset,
336
- // skip the block below and go ahead to emit the storage. Otherwise, try to
337
- // add bitfields to the run.
320
+ // Decide whether to continue extending the current bitfield run.
321
+ //
322
+ // Skip the block below and go directly to emitting storage if any of the
323
+ // following is true:
324
+ // - 1. The first field in the run is better treated as its own run.
325
+ // - 2. We have reached the end of the fields.
326
+ // - 3. The current field (or set of fields) is better as its own run.
327
+ // - 4. The current field is a zero-width bitfield or:
328
+ // - Zero-length bitfield alignment is enabled, and
329
+ // - Bitfield type alignment is enabled.
330
+ // - 5. The current field's offset doesn't match the expected tail (i.e.,
331
+ // layout isn't contiguous).
332
+ //
333
+ // If none of the above conditions are met, add the current field to the
334
+ // current run.
338
335
uint64_t nextTail = tail;
339
336
if (field != fieldEnd)
340
337
nextTail += field->getBitWidthValue ();
341
338
342
- if (!startFieldAsSingleRun && field != fieldEnd &&
343
- ! isBetterAsSingleFieldRun (tail - startBitOffset, startBitOffset,
344
- nextTail) &&
339
+ // TODO: add condition 1 and 3
340
+ assert (! cir::MissingFeatures::nonFineGrainedBitfields ());
341
+ if (field != fieldEnd &&
345
342
(!field->isZeroLengthBitField () ||
346
343
(!astContext.getTargetInfo ().useZeroLengthBitfieldAlignment () &&
347
344
!astContext.getTargetInfo ().useBitFieldTypeAlignment ())) &&
@@ -362,7 +359,6 @@ void CIRRecordLowering::accumulateBitFields(
362
359
members.push_back (MemberInfo (bitsToCharUnits (startBitOffset),
363
360
MemberInfo::InfoKind::Field, nullptr , *run));
364
361
run = fieldEnd;
365
- startFieldAsSingleRun = false ;
366
362
}
367
363
}
368
364
0 commit comments