Skip to content

Commit a2c4bad

Browse files
committed
kgo: universally switch to 1.19's atomics if on Go 1.19+
The current lint on arm should be ensuring alignment is proper, but apparently that is not always the case, as seen in #286. Go has compiler intrinsics to ensure proper alignment for the actual atomic number types introduced in 1.19. This doesn't fix 1.18, but it should fix 1.19+. Closes #286.
1 parent 66e626f commit a2c4bad

File tree

14 files changed

+139
-113
lines changed

14 files changed

+139
-113
lines changed

pkg/kgo/atomic_maybe_work.go

Lines changed: 8 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,26 +1,24 @@
11
package kgo
22

3-
import "sync/atomic"
4-
53
const (
64
stateUnstarted = iota
75
stateWorking
86
stateContinueWorking
97
)
108

11-
type workLoop struct{ state uint32 }
9+
type workLoop struct{ state atomicU32 }
1210

1311
// maybeBegin returns whether a work loop should begin.
1412
func (l *workLoop) maybeBegin() bool {
1513
var state uint32
1614
var done bool
1715
for !done {
18-
switch state = atomic.LoadUint32(&l.state); state {
16+
switch state = l.state.Load(); state {
1917
case stateUnstarted:
20-
done = atomic.CompareAndSwapUint32(&l.state, state, stateWorking)
18+
done = l.state.CompareAndSwap(state, stateWorking)
2119
state = stateWorking
2220
case stateWorking:
23-
done = atomic.CompareAndSwapUint32(&l.state, state, stateContinueWorking)
21+
done = l.state.CompareAndSwap(state, stateContinueWorking)
2422
state = stateContinueWorking
2523
case stateContinueWorking:
2624
done = true
@@ -43,24 +41,24 @@ func (l *workLoop) maybeBegin() bool {
4341
// since the loop itself calls MaybeFinish after it has been started, this
4442
// should never be called if the loop is unstarted.
4543
func (l *workLoop) maybeFinish(again bool) bool {
46-
switch state := atomic.LoadUint32(&l.state); state {
44+
switch state := l.state.Load(); state {
4745
// Working:
4846
// If again, we know we should continue; keep our state.
4947
// If not again, we try to downgrade state and stop.
5048
// If we cannot, then something slipped in to say keep going.
5149
case stateWorking:
5250
if !again {
53-
again = !atomic.CompareAndSwapUint32(&l.state, state, stateUnstarted)
51+
again = !l.state.CompareAndSwap(state, stateUnstarted)
5452
}
5553
// Continue: demote ourself and run again no matter what.
5654
case stateContinueWorking:
57-
atomic.StoreUint32(&l.state, stateWorking)
55+
l.state.Store(stateWorking)
5856
again = true
5957
}
6058

6159
return again
6260
}
6361

6462
func (l *workLoop) hardFinish() {
65-
atomic.StoreUint32(&l.state, stateUnstarted)
63+
l.state.Store(stateUnstarted)
6664
}

pkg/kgo/broker.go

Lines changed: 27 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -155,7 +155,7 @@ type broker struct {
155155
// reqs manages incoming message requests.
156156
reqs ringReq
157157
// dead is an atomic so a backed up reqs cannot block broker stoppage.
158-
dead int32
158+
dead atomicBool
159159
}
160160

161161
// brokerVersions is loaded once (and potentially a few times concurrently if
@@ -214,7 +214,7 @@ func (cl *Client) newBroker(nodeID int32, host string, port int32, rack *string)
214214

215215
// stopForever permanently disables this broker.
216216
func (b *broker) stopForever() {
217-
if atomic.SwapInt32(&b.dead, 1) == 1 {
217+
if b.dead.Swap(true) {
218218
return
219219
}
220220

@@ -502,7 +502,7 @@ func (b *broker) loadConnection(ctx context.Context, req kmsg.Request) (*brokerC
502502
pcxn = &b.cxnSlow
503503
}
504504

505-
if *pcxn != nil && atomic.LoadInt32(&(*pcxn).dead) == 0 {
505+
if *pcxn != nil && !(*pcxn).dead.Load() {
506506
return *pcxn, nil
507507
}
508508

@@ -581,7 +581,7 @@ func (b *broker) reapConnections(idleTimeout time.Duration) (total int) {
581581
b.cxnGroup,
582582
b.cxnSlow,
583583
} {
584-
if cxn == nil || atomic.LoadInt32(&cxn.dead) == 1 {
584+
if cxn == nil || cxn.dead.Load() {
585585
continue
586586
}
587587

@@ -592,11 +592,11 @@ func (b *broker) reapConnections(idleTimeout time.Duration) (total int) {
592592
// - produce can write but never read
593593
// - fetch can hang for a while reading (infrequent writes)
594594

595-
lastWrite := time.Unix(0, atomic.LoadInt64(&cxn.lastWrite))
596-
lastRead := time.Unix(0, atomic.LoadInt64(&cxn.lastRead))
595+
lastWrite := time.Unix(0, cxn.lastWrite.Load())
596+
lastRead := time.Unix(0, cxn.lastRead.Load())
597597

598-
writeIdle := time.Since(lastWrite) > idleTimeout && atomic.LoadUint32(&cxn.writing) == 0
599-
readIdle := time.Since(lastRead) > idleTimeout && atomic.LoadUint32(&cxn.reading) == 0
598+
writeIdle := time.Since(lastWrite) > idleTimeout && !cxn.writing.Load()
599+
readIdle := time.Since(lastRead) > idleTimeout && !cxn.reading.Load()
600600

601601
if writeIdle && readIdle {
602602
cxn.die()
@@ -634,7 +634,7 @@ func (b *broker) connect(ctx context.Context) (net.Conn, error) {
634634
// brokerCxn manages an actual connection to a Kafka broker. This is separate
635635
// the broker struct to allow lazy connection (re)creation.
636636
type brokerCxn struct {
637-
throttleUntil int64 // atomic nanosec
637+
throttleUntil atomicI64 // atomic nanosec
638638

639639
conn net.Conn
640640

@@ -651,17 +651,17 @@ type brokerCxn struct {
651651
// The following four fields are used for connection reaping.
652652
// Write is only updated in one location; read is updated in three
653653
// due to readConn, readConnAsync, and discard.
654-
lastWrite int64
655-
lastRead int64
656-
writing uint32
657-
reading uint32
654+
lastWrite atomicI64
655+
lastRead atomicI64
656+
writing atomicBool
657+
reading atomicBool
658658

659659
successes uint64
660660

661661
// resps manages reading kafka responses.
662662
resps ringResp
663663
// dead is an atomic so that a backed up resps cannot block cxn death.
664-
dead int32
664+
dead atomicBool
665665
// closed in cloneConn; allows throttle waiting to quit
666666
deadCh chan struct{}
667667
}
@@ -982,7 +982,7 @@ func maybeUpdateCtxErr(clientCtx, reqCtx context.Context, err *error) {
982982
func (cxn *brokerCxn) writeRequest(ctx context.Context, enqueuedForWritingAt time.Time, req kmsg.Request) (corrID int32, bytesWritten int, writeWait, timeToWrite time.Duration, readEnqueue time.Time, writeErr error) {
983983
// A nil ctx means we cannot be throttled.
984984
if ctx != nil {
985-
throttleUntil := time.Unix(0, atomic.LoadInt64(&cxn.throttleUntil))
985+
throttleUntil := time.Unix(0, cxn.throttleUntil.Load())
986986
if sleep := time.Until(throttleUntil); sleep > 0 {
987987
after := time.NewTimer(sleep)
988988
select {
@@ -1037,10 +1037,10 @@ func (cxn *brokerCxn) writeConn(
10371037
timeout time.Duration,
10381038
enqueuedForWritingAt time.Time,
10391039
) (bytesWritten int, writeWait, timeToWrite time.Duration, readEnqueue time.Time, writeErr error) {
1040-
atomic.SwapUint32(&cxn.writing, 1)
1040+
cxn.writing.Store(true)
10411041
defer func() {
1042-
atomic.StoreInt64(&cxn.lastWrite, time.Now().UnixNano())
1043-
atomic.SwapUint32(&cxn.writing, 0)
1042+
cxn.lastWrite.Store(time.Now().UnixNano())
1043+
cxn.writing.Store(false)
10441044
}()
10451045

10461046
if ctx == nil {
@@ -1085,10 +1085,10 @@ func (cxn *brokerCxn) readConn(
10851085
timeout time.Duration,
10861086
enqueuedForReadingAt time.Time,
10871087
) (nread int, buf []byte, readWait, timeToRead time.Duration, err error) {
1088-
atomic.SwapUint32(&cxn.reading, 1)
1088+
cxn.reading.Store(true)
10891089
defer func() {
1090-
atomic.StoreInt64(&cxn.lastRead, time.Now().UnixNano())
1091-
atomic.SwapUint32(&cxn.reading, 0)
1090+
cxn.lastRead.Store(time.Now().UnixNano())
1091+
cxn.reading.Store(false)
10921092
}()
10931093

10941094
if ctx == nil {
@@ -1256,7 +1256,7 @@ func (cxn *brokerCxn) closeConn() {
12561256
// die kills a broker connection (which could be dead already) and replies to
12571257
// all requests awaiting responses appropriately.
12581258
func (cxn *brokerCxn) die() {
1259-
if cxn == nil || atomic.SwapInt32(&cxn.dead, 1) == 1 {
1259+
if cxn == nil || cxn.dead.Swap(true) {
12601260
return
12611261
}
12621262
cxn.closeConn()
@@ -1364,10 +1364,10 @@ func (cxn *brokerCxn) discard() {
13641364
}
13651365
deadlineMu.Unlock()
13661366

1367-
atomic.SwapUint32(&cxn.reading, 1)
1367+
cxn.reading.Store(true)
13681368
defer func() {
1369-
atomic.StoreInt64(&cxn.lastRead, time.Now().UnixNano())
1370-
atomic.SwapUint32(&cxn.reading, 0)
1369+
cxn.lastRead.Store(time.Now().UnixNano())
1370+
cxn.reading.Store(false)
13711371
}()
13721372

13731373
readStart := time.Now()
@@ -1470,8 +1470,8 @@ func (cxn *brokerCxn) handleResp(pr promisedResp) {
14701470
if millis > 0 {
14711471
if throttlesAfterResp {
14721472
throttleUntil := time.Now().Add(time.Millisecond * time.Duration(millis)).UnixNano()
1473-
if throttleUntil > cxn.throttleUntil {
1474-
atomic.StoreInt64(&cxn.throttleUntil, throttleUntil)
1473+
if throttleUntil > cxn.throttleUntil.Load() {
1474+
cxn.throttleUntil.Store(throttleUntil)
14751475
}
14761476
}
14771477
cxn.cl.cfg.hooks.each(func(h Hook) {

pkg/kgo/consumer.go

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -152,7 +152,7 @@ func (o Offset) At(at int64) Offset {
152152
}
153153

154154
type consumer struct {
155-
bufferedRecords int64
155+
bufferedRecords atomicI64
156156

157157
cl *Client
158158

@@ -272,7 +272,7 @@ func (c *consumer) unaddRebalance() {
272272
// problematic if for you if this function is consistently returning large
273273
// values.
274274
func (cl *Client) BufferedFetchRecords() int64 {
275-
return atomic.LoadInt64(&cl.consumer.bufferedRecords)
275+
return cl.consumer.bufferedRecords.Load()
276276
}
277277

278278
type usedCursors map[*cursor]struct{}
@@ -1224,7 +1224,7 @@ type consumerSession struct {
12241224
desireFetchCh chan chan chan struct{}
12251225
cancelFetchCh chan chan chan struct{}
12261226
allowedFetches int
1227-
fetchManagerStarted uint32 // atomic, once 1, we start the fetch manager
1227+
fetchManagerStarted atomicBool // atomic, once true, we start the fetch manager
12281228

12291229
// Workers signify the number of fetch and list / epoch goroutines that
12301230
// are currently running within the context of this consumer session.
@@ -1278,7 +1278,7 @@ func (c *consumer) newConsumerSession(tps *topicsPartitions) *consumerSession {
12781278
}
12791279

12801280
func (s *consumerSession) desireFetch() chan chan chan struct{} {
1281-
if atomic.SwapUint32(&s.fetchManagerStarted, 1) == 0 {
1281+
if !s.fetchManagerStarted.Swap(true) {
12821282
go s.manageFetchConcurrency()
12831283
}
12841284
return s.desireFetchCh

pkg/kgo/go118.go

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,3 +24,34 @@ func (b *atomicBool) Swap(v bool) bool {
2424
}
2525
return atomic.SwapUint32((*uint32)(b), swap) == 1
2626
}
27+
28+
type atomicI32 int32
29+
30+
func (v *atomicI32) Add(s int32) int32 { return atomic.AddInt32((*int32)(v), s) }
31+
func (v *atomicI32) Store(s int32) { atomic.StoreInt32((*int32)(v), s) }
32+
func (v *atomicI32) Load() int32 { return atomic.LoadInt32((*int32)(v)) }
33+
func (v *atomicI32) Swap(s int32) int32 { return atomic.SwapInt32((*int32)(v), s) }
34+
35+
type atomicU32 uint32
36+
37+
func (v *atomicU32) Add(s uint32) uint32 { return atomic.AddUint32((*uint32)(v), s) }
38+
func (v *atomicU32) Store(s uint32) { atomic.StoreUint32((*uint32)(v), s) }
39+
func (v *atomicU32) Load() uint32 { return atomic.LoadUint32((*uint32)(v)) }
40+
func (v *atomicU32) Swap(s uint32) uint32 { return atomic.SwapUint32((*uint32)(v), s) }
41+
func (v *atomicU32) CompareAndSwap(old, new uint32) bool {
42+
return atomic.CompareAndSwapUint32((*uint32)(v), old, new)
43+
}
44+
45+
type atomicI64 int64
46+
47+
func (v *atomicI64) Add(s int64) int64 { return atomic.AddInt64((*int64)(v), s) }
48+
func (v *atomicI64) Store(s int64) { atomic.StoreInt64((*int64)(v), s) }
49+
func (v *atomicI64) Load() int64 { return atomic.LoadInt64((*int64)(v)) }
50+
func (v *atomicI64) Swap(s int64) int64 { return atomic.SwapInt64((*int64)(v), s) }
51+
52+
type atomicU64 uint64
53+
54+
func (v *atomicU64) Add(s uint64) uint64 { return atomic.AddUint64((*uint64)(v), s) }
55+
func (v *atomicU64) Store(s uint64) { atomic.StoreUint64((*uint64)(v), s) }
56+
func (v *atomicU64) Load() uint64 { return atomic.LoadUint64((*uint64)(v)) }
57+
func (v *atomicU64) Swap(s uint64) uint64 { return atomic.SwapUint64((*uint64)(v), s) }

pkg/kgo/go119.go

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,10 @@ package kgo
55

66
import "sync/atomic"
77

8-
type atomicBool struct {
9-
atomic.Bool
10-
}
8+
type (
9+
atomicBool struct{ atomic.Bool }
10+
atomicI32 struct{ atomic.Int32 }
11+
atomicU32 struct{ atomic.Uint32 }
12+
atomicI64 struct{ atomic.Int64 }
13+
atomicU64 struct{ atomic.Uint64 }
14+
)

pkg/kgo/group_test.go

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,6 @@ import (
66
"fmt"
77
"os"
88
"strconv"
9-
"sync/atomic"
109
"testing"
1110
"time"
1211
)
@@ -178,7 +177,7 @@ func (c *testConsumer) etl(etlsBeforeQuit int) {
178177
fetches := cl.PollRecords(ctx, 100)
179178
cancel()
180179
if fetches.Err() == context.DeadlineExceeded || fetches.Err() == ErrClientClosed {
181-
if consumed := int(atomic.LoadUint64(&c.consumed)); consumed == testRecordLimit {
180+
if consumed := int(c.consumed.Load()); consumed == testRecordLimit {
182181
return
183182
} else if consumed > testRecordLimit {
184183
panic(fmt.Sprintf("invalid: consumed too much from %s (group %s)", c.consumeFrom, c.group))
@@ -217,7 +216,7 @@ func (c *testConsumer) etl(etlsBeforeQuit int) {
217216

218217
c.mu.Unlock()
219218

220-
atomic.AddUint64(&c.consumed, 1)
219+
c.consumed.Add(1)
221220

222221
cl.Produce(
223222
context.Background(),

pkg/kgo/helpers_test.go

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@ import (
1212
"strconv"
1313
"strings"
1414
"sync"
15-
"sync/atomic"
1615
"testing"
1716
"time"
1817

@@ -66,7 +65,7 @@ func getSeedBrokers() Opt {
6665
return SeedBrokers(strings.Split(seeds, ",")...)
6766
}
6867

69-
var loggerNum int64
68+
var loggerNum atomicI64
7069

7170
var testLogLevel = func() LogLevel {
7271
level := strings.ToLower(os.Getenv("KGO_LOG_LEVEL"))
@@ -80,7 +79,7 @@ var testLogLevel = func() LogLevel {
8079
}()
8180

8281
func testLogger() Logger {
83-
num := atomic.AddInt64(&loggerNum, 1)
82+
num := loggerNum.Add(1)
8483
pfx := strconv.Itoa(int(num))
8584
return BasicLogger(os.Stderr, testLogLevel, func() string {
8685
return time.Now().Format("[15:04:05 ") + pfx + "]"
@@ -193,7 +192,7 @@ type testConsumer struct {
193192

194193
expBody []byte // what every record body should be
195194

196-
consumed uint64 // shared atomically
195+
consumed atomicU64 // shared atomically
197196

198197
wg sync.WaitGroup
199198
mu sync.Mutex

pkg/kgo/partitioner.go

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@ package kgo
33
import (
44
"math"
55
"math/rand"
6-
"sync/atomic"
76
"time"
87

98
"github.com/twmb/franz-go/pkg/kbin"
@@ -200,7 +199,7 @@ type (
200199

201200
func (i *leastBackupInput) Next() (int, int64) {
202201
last := len(i.mapping) - 1
203-
buffered := atomic.LoadInt64(&i.mapping[last].records.buffered)
202+
buffered := i.mapping[last].records.buffered.Load()
204203
i.mapping = i.mapping[:last]
205204
return last, buffered
206205
}

0 commit comments

Comments
 (0)