From 93f17d49a0aaf551c5de0ceea534ecc28cfebd1a Mon Sep 17 00:00:00 2001 From: Anton Bochkov Date: Fri, 24 Apr 2026 10:34:01 +0300 Subject: [PATCH 1/6] test: fix sync.Pool contract assumptions --- internal/backend/syncpool.go | 47 ++-- internal/backend/syncpool_test.go | 123 ++++++---- internal/testutil/runtime.go | 25 +- internal/testutil/runtime_test.go | 17 +- pool_test.go | 363 +++++++++++++----------------- 5 files changed, 286 insertions(+), 289 deletions(-) diff --git a/internal/backend/syncpool.go b/internal/backend/syncpool.go index 8ec3e04..4d029fc 100644 --- a/internal/backend/syncpool.go +++ b/internal/backend/syncpool.go @@ -36,16 +36,16 @@ import ( // creation, reset, reuse admission, and explicit drop handling. None of that // policy belongs in the low-level storage backend. This type exists to isolate // the backend-specific details of storing and retrieving reusable values via -// sync.Pool while keeping the public runtime small and policy-oriented. +// [sync.Pool] while keeping the public runtime small and policy-oriented. // // In practice, SyncPool is the boundary that converts an untyped any-based // backend into a typed generic backend suitable for the rest of the package. // It centralizes the only place where values cross the boundary between the -// generic public API and the underlying sync.Pool API. +// generic public API and the underlying [sync.Pool] API. // -// Why this type exists instead of using sync.Pool directly everywhere +// Why this type exists instead of using [sync.Pool] directly everywhere // -// - It keeps all sync.Pool-specific mechanics in one internal location. +// - It keeps all [sync.Pool]-specific mechanics in one internal location. // - It prevents the public pool runtime from repeating any <-> T conversion // logic. // - It makes future backend replacement or debug instrumentation local. @@ -60,12 +60,12 @@ import ( // - NewSyncPool requires a non-nil constructor. // - The zero value of SyncPool is not ready for use. // - SyncPool values must not be copied after first use because the type owns -// an embedded sync.Pool. +// an embedded [sync.Pool]. // // # Concurrency // // SyncPool is safe for concurrent use because it delegates storage and reuse -// mechanics to sync.Pool, which is itself designed for concurrent access. +// mechanics to [sync.Pool], which is itself designed for concurrent access. // // # Performance notes // @@ -86,29 +86,29 @@ import ( // // # Internal invariant // -// Every value stored in the underlying sync.Pool must have dynamic type T. +// Every value stored in the underlying [sync.Pool] must have dynamic type T. // Any violation of that invariant indicates an internal programming error in // this repository, not a recoverable user-space condition. // // The type is deliberately small and stable. If the project later adds debug // instrumentation, alternate backends, or build-tag-specific behaviour, those // changes should continue to preserve this file as the narrow typed bridge over -// sync.Pool. +// [sync.Pool]. type SyncPool[T any] struct { pool sync.Pool } -// NewSyncPool constructs a typed sync.Pool-backed backend. +// NewSyncPool constructs a typed [sync.Pool]-backed backend. // // newFn is mandatory. It is used as the slow-path constructor whenever the -// underlying sync.Pool cannot provide a reusable value. +// underlying [sync.Pool] cannot provide a reusable value. // // NewSyncPool panics if newFn is nil. Although higher layers already validate // this condition when resolving public Options, the backend defends its own // contract explicitly so that it remains correct and self-contained when read // or tested in isolation. // -// The constructor installed into sync.Pool.New must return values whose dynamic +// The constructor installed into [sync.Pool.New] must return values whose dynamic // type is exactly T. That invariant is required for Get to remain type-safe. func NewSyncPool[T any](newFn func() T) *SyncPool[T] { if newFn == nil { @@ -126,8 +126,8 @@ func NewSyncPool[T any](newFn func() T) *SyncPool[T] { // // Behaviour // -// - If sync.Pool has a previously stored value, that value is returned. -// - Otherwise sync.Pool invokes the constructor installed by NewSyncPool. +// - If [sync.Pool] has a previously stored value, that value is returned. +// - Otherwise [sync.Pool] invokes the constructor installed by NewSyncPool. // - The result is asserted back to T and returned to the caller. // // This method performs no lifecycle work beyond retrieval. In particular, Get @@ -146,12 +146,7 @@ func (p *SyncPool[T]) Get() T { panic("pool: Get called on nil SyncPool") } - value := p.pool.Get() - typed, ok := value.(T) - if !ok { - panic(unexpectedTypePanic[T](value)) - } - return typed + return typedPoolValue[T](p.pool.Get()) } // Put stores a value for future reuse. @@ -173,6 +168,20 @@ func (p *SyncPool[T]) Put(value T) { p.pool.Put(value) } +// typedPoolValue converts a raw backend value into the expected generic type. +// +// Keeping the assertion logic in a small helper makes the backend fast path +// readable and gives tests a deterministic boundary for impossible type-mismatch +// scenarios. The helper does not attempt recovery: a mismatched value means the +// internal backend invariant has already been violated. +func typedPoolValue[T any](value any) T { + typed, ok := value.(T) + if !ok { + panic(unexpectedTypePanic[T](value)) + } + return typed +} + // unexpectedTypePanic formats a stable panic message for impossible backend // type mismatches. // diff --git a/internal/backend/syncpool_test.go b/internal/backend/syncpool_test.go index 1cf712a..9a6791d 100644 --- a/internal/backend/syncpool_test.go +++ b/internal/backend/syncpool_test.go @@ -25,7 +25,8 @@ import ( // syncPoolTestObject is intentionally compact. // // The backend contract under test is about type preservation, constructor -// behaviour, and round-trip reuse semantics. A small object keeps the tests +// behaviour, and reuse eligibility rather than stable same-instance retention. +// A small object keeps the tests // focused on those invariants instead of unrelated domain behaviour. type syncPoolTestObject struct { ID int @@ -72,7 +73,7 @@ func TestNewSyncPool(t *testing.T) { }) } -func TestSyncPoolGetPutRoundTrip(t *testing.T) { +func TestSyncPoolGetAfterPut(t *testing.T) { t.Run("pointer type", func(t *testing.T) { testutil.WithControlledSteadyStatePoolRoundTrip(t, func() { calls := 0 @@ -85,12 +86,7 @@ func TestSyncPoolGetPutRoundTrip(t *testing.T) { pool.Put(stored) got := pool.Get() - if got != stored { - t.Fatalf("Get() returned pointer %p after Put(), want %p", got, stored) - } - if calls != 0 { - t.Fatalf("constructor call count after pointer Put()/Get() round-trip = %d, want 0", calls) - } + assertPointerGetAfterPut(t, got, stored, calls) }) }) @@ -106,12 +102,7 @@ func TestSyncPoolGetPutRoundTrip(t *testing.T) { pool.Put(stored) got := pool.Get() - if got != stored { - t.Fatalf("Get() returned value %+v after Put(), want %+v", got, stored) - } - if calls != 0 { - t.Fatalf("constructor call count after value Put()/Get() round-trip = %d, want 0", calls) - } + assertValueGetAfterPut(t, got, stored, calls) }) }) } @@ -128,19 +119,29 @@ func TestSyncPoolTypedNilHandling(t *testing.T) { } }) - t.Run("backend may round-trip typed nil pointer", func(t *testing.T) { + t.Run("stored typed nil pointer does not break backend type safety", func(t *testing.T) { testutil.WithControlledSteadyStatePoolRoundTrip(t, func() { + calls := 0 pool := NewSyncPool(func() *syncPoolTestObject { - t.Fatal("constructor must not be called when a typed nil pointer was stored explicitly") - return &syncPoolTestObject{} + calls++ + return &syncPoolTestObject{ID: calls} }) var stored *syncPoolTestObject pool.Put(stored) got := pool.Get() - if got != nil { - t.Fatalf("Get() result after storing typed nil pointer = %v, want nil", got) + switch { + case got == nil: + if calls != 0 { + t.Fatalf("constructor call count after nil pointer round-trip = %d, want 0 when stored nil was reused", calls) + } + case got.ID == 1: + if calls != 1 { + t.Fatalf("constructor call count after nil pointer fallback = %d, want 1", calls) + } + default: + t.Fatalf("Get() result after storing typed nil pointer = %+v, want nil or fresh constructor value", got) } }) }) @@ -174,40 +175,42 @@ func TestSyncPoolNilReceiverPanics(t *testing.T) { }) } -func TestSyncPoolGetPanicsOnUnexpectedStoredType(t *testing.T) { - t.Run("wrong non-nil dynamic type", func(t *testing.T) { - pool := NewSyncPool(func() *syncPoolTestObject { - return &syncPoolTestObject{} - }) +func TestTypedPoolValue(t *testing.T) { + t.Run("matching value type", func(t *testing.T) { + got := typedPoolValue[syncPoolTestObject](syncPoolTestObject{ID: 42}) + if got != (syncPoolTestObject{ID: 42}) { + t.Fatalf("typedPoolValue[syncPoolTestObject](...) = %+v, want %+v", got, syncPoolTestObject{ID: 42}) + } + }) - // This intentionally bypasses the typed API to verify the internal - // invariant: every value stored in the embedded sync.Pool must still - // have dynamic type T when read back. - pool.pool.Put(1) + t.Run("matching typed nil pointer", func(t *testing.T) { + var want *syncPoolTestObject + got := typedPoolValue[*syncPoolTestObject](want) + if got != nil { + t.Fatalf("typedPoolValue[*syncPoolTestObject](typed nil) = %v, want nil", got) + } + }) +} +func TestTypedPoolValuePanicsOnUnexpectedType(t *testing.T) { + t.Run("wrong non-nil dynamic type", func(t *testing.T) { testutil.AssertPanicMessage( t, - "Get() with scalar value of wrong type stored in embedded sync.Pool", + "typedPoolValue[*syncPoolTestObject](1)", func() { - _ = pool.Get() + _ = typedPoolValue[*syncPoolTestObject](1) }, unexpectedTypePanic[*syncPoolTestObject](1), ) }) t.Run("wrong typed nil dynamic type", func(t *testing.T) { - pool := NewSyncPool(func() *syncPoolTestObject { - return &syncPoolTestObject{} - }) - var wrong *syncPoolOtherObject - pool.pool.Put(wrong) - testutil.AssertPanicMessage( t, - "Get() with typed nil pointer of wrong type stored in embedded sync.Pool", + "typedPoolValue[*syncPoolTestObject](wrong)", func() { - _ = pool.Get() + _ = typedPoolValue[*syncPoolTestObject](wrong) }, unexpectedTypePanic[*syncPoolTestObject](wrong), ) @@ -231,3 +234,47 @@ func TestUnexpectedTypePanic(t *testing.T) { } }) } + +func assertPointerGetAfterPut( + t *testing.T, + got *syncPoolTestObject, + stored *syncPoolTestObject, + constructorCalls int, +) { + t.Helper() + + switch { + case got == stored: + if constructorCalls != 0 { + t.Fatalf("constructor call count after pointer reuse = %d, want 0", constructorCalls) + } + case got != nil && got.ID == 1: + if constructorCalls != 1 { + t.Fatalf("constructor call count after pointer fallback construction = %d, want 1", constructorCalls) + } + default: + t.Fatalf("Get() after pointer Put() = %+v, want stored pointer or fresh constructor value", got) + } +} + +func assertValueGetAfterPut( + t *testing.T, + got syncPoolTestObject, + stored syncPoolTestObject, + constructorCalls int, +) { + t.Helper() + + switch { + case got == stored: + if constructorCalls != 0 { + t.Fatalf("constructor call count after value reuse = %d, want 0", constructorCalls) + } + case got == (syncPoolTestObject{ID: 1}): + if constructorCalls != 1 { + t.Fatalf("constructor call count after value fallback construction = %d, want 1", constructorCalls) + } + default: + t.Fatalf("Get() after value Put() = %+v, want stored value or fresh constructor value", got) + } +} diff --git a/internal/testutil/runtime.go b/internal/testutil/runtime.go index a21f2e6..e45b39f 100644 --- a/internal/testutil/runtime.go +++ b/internal/testutil/runtime.go @@ -26,7 +26,7 @@ import ( // setting before returning. // // The helper is intended for tests and benchmarks that need deterministic -// single-P behaviour, especially around sync.Pool local-cache semantics. The +// single-P behaviour, especially around [sync.Pool] local-cache semantics. The // restoration is scoped to the callback itself rather than deferred to test // cleanup so later assertions in the same test see the original runtime state. func WithSingleP(tb testing.TB, fn func()) { @@ -42,7 +42,7 @@ func WithSingleP(tb testing.TB, fn func()) { // GC target before returning. // // This helper is useful when a test or benchmark needs to prevent transient GC -// cycles from discarding sync.Pool state between tightly-coupled Put/Get steps. +// cycles from discarding [sync.Pool] state between tightly-coupled Put/Get steps. // As with WithSingleP, restoration happens immediately after fn returns so the // helper composes safely inside larger tests. func WithGCDisabled(tb testing.TB, fn func()) { @@ -54,18 +54,19 @@ func WithGCDisabled(tb testing.TB, fn func()) { fn() } -// WithControlledSteadyStatePoolRoundTrip makes immediate Put/Get assertions -// deterministic enough for controlled tests and benchmarks that rely on -// sync.Pool local-cache reuse. +// WithControlledSteadyStatePoolRoundTrip scopes a callback to a quieter +// runtime state for pool-focused tests and benchmarks. // -// Pinning execution to one P avoids per-P handoff surprises, and disabling GC -// prevents cached values from being discarded between Put and Get. The helper -// restores both runtime settings before it returns to the caller. +// Pinning execution to one P avoids per-P handoff noise, and disabling GC +// reduces the chance that cached values disappear between tightly coupled pool +// operations. The helper restores both runtime settings before it returns to +// the caller. // -// This helper intentionally creates an idealized local steady-state execution -// mode. It is appropriate for controlled hot path measurements and for unit -// tests that need deterministic immediate reuse. It MUST NOT be described as a -// general runtime mode in performance reports. +// Despite the historical name, this helper does not turn exact same-instance +// reuse into a valid contract. Tests must still avoid asserting that a raw +// [sync.Pool]-backed path returns the exact value that was just stored. Use the +// helper only to reduce scheduler and GC noise around controlled scenarios, not +// to claim deterministic retention semantics. func WithControlledSteadyStatePoolRoundTrip(tb testing.TB, fn func()) { tb.Helper() diff --git a/internal/testutil/runtime_test.go b/internal/testutil/runtime_test.go index 010fea5..a7ad33c 100644 --- a/internal/testutil/runtime_test.go +++ b/internal/testutil/runtime_test.go @@ -19,7 +19,6 @@ package testutil import ( "runtime" "runtime/debug" - "sync" "testing" ) @@ -106,21 +105,11 @@ func TestWithControlledSteadyStatePoolRoundTrip(t *testing.T) { WithControlledSteadyStatePoolRoundTrip(t, func() { // The combined helper should apply both single-P execution and GC - // suppression at once because backend round-trip tests rely on both. + // suppression at once. The helper deliberately constrains runtime noise; + // it does not upgrade raw [sync.Pool] reuse into a same-instance + // guarantee, so this test checks only the runtime controls themselves. insideP = runtime.GOMAXPROCS(0) insideGC = debug.SetGCPercent(250) - - var pool sync.Pool - stored := &struct{ ID int }{ID: 42} - pool.Put(stored) - - // A direct sync.Pool immediate round trip is the behavioural reason this - // helper exists. If this ever stops working under the scoped runtime - // changes, backend tests and benchmarks lose their stability contract. - got := pool.Get() - if got != stored { - t.Fatalf("sync.Pool round-trip inside WithControlledSteadyStatePoolRoundTrip returned %v, want %v", got, stored) - } }) if insideP != 1 { diff --git a/pool_test.go b/pool_test.go index 691bccf..bc3a32d 100644 --- a/pool_test.go +++ b/pool_test.go @@ -49,6 +49,11 @@ type poolHookCalls struct { drop int } +type poolTestValue struct { + ID int + Dirty bool +} + func TestNew(t *testing.T) { t.Run("panics when Options.New is nil", func(t *testing.T) { testutil.AssertPanicMessage( @@ -112,220 +117,132 @@ func TestPoolGet(t *testing.T) { }) } -func TestPoolPut(t *testing.T) { - t.Run("accepted pointer values are reset before reuse", func(t *testing.T) { - testutil.WithControlledSteadyStatePoolRoundTrip(t, func() { - calls := poolHookCalls{} - events := make([]string, 0, 2) - pool := New(Options[*poolTestObject]{ - New: func() *poolTestObject { - calls.new++ - return &poolTestObject{ - ID: calls.new, - State: "fresh", - Payload: make([]byte, 0, 64), - } - }, - Reset: func(v *poolTestObject) { - calls.reset++ - events = append(events, "reset") - v.State = "clean" - v.Flag = false - v.Payload = v.Payload[:0] - v.ResetSeen = true - }, - Reuse: func(v *poolTestObject) bool { - calls.reuse++ - events = append(events, fmt.Sprintf("reuse:%s", v.State)) - return cap(v.Payload) <= 64 - }, - OnDrop: func(v *poolTestObject) { - calls.drop++ - events = append(events, "drop") - v.DropSeen = true - }, - }) - - first := pool.Get() - first.State = "dirty" - first.Flag = true - first.Payload = append(first.Payload, []byte("dirty-state")...) - - pool.Put(first) - - second := pool.Get() - - testutil.AssertEventSequence( - t, - "accepted pointer Put() path", - events, - []string{"reuse:dirty", "reset"}, - ) - if second != first { - t.Fatalf("Get() after accepted Put() returned pointer %p, want original pointer %p", second, first) - } - if second.State != "clean" { - t.Fatalf("reused object state = %q, want %q", second.State, "clean") - } - if second.Flag { - t.Fatal("reused object retained dirty Flag, want false after reset") - } - if len(second.Payload) != 0 { - t.Fatalf("reused object payload length = %d, want 0 after reset", len(second.Payload)) - } - if !second.ResetSeen { - t.Fatal("reused object does not show reset marker, want reset hook to have run") - } - if second.DropSeen { - t.Fatal("accepted object was marked as dropped, want drop hook to remain untouched") - } - if calls != (poolHookCalls{new: 1, reset: 1, reuse: 1}) { - t.Fatalf("hook calls after accepted pointer round-trip = %+v, want new=1 reset=1 reuse=1 drop=0", calls) - } - }) - }) +func TestPoolPutAcceptedPointerValue(t *testing.T) { + calls, events, pool := newPointerPutTestPool(64) + value := &poolTestObject{ + ID: 42, + State: "dirty", + Flag: true, + Payload: append(make([]byte, 0, 64), []byte("dirty-state")...), + } - t.Run("rejected pointer values are dropped without reset or reuse", func(t *testing.T) { - testutil.WithControlledSteadyStatePoolRoundTrip(t, func() { - calls := poolHookCalls{} - events := make([]string, 0, 2) - pool := New(Options[*poolTestObject]{ - New: func() *poolTestObject { - calls.new++ - return &poolTestObject{ - ID: calls.new, - State: "fresh", - Payload: make([]byte, 0, 8), - } - }, - Reset: func(v *poolTestObject) { - calls.reset++ - events = append(events, "reset") - v.State = "clean" - v.Payload = v.Payload[:0] - }, - Reuse: func(v *poolTestObject) bool { - calls.reuse++ - events = append(events, fmt.Sprintf("reuse:%s", v.State)) - return cap(v.Payload) <= 4 - }, - OnDrop: func(v *poolTestObject) { - calls.drop++ - events = append(events, fmt.Sprintf("drop:%s", v.State)) - v.DropSeen = true - }, - }) - - first := pool.Get() - first.State = "oversized" - first.Payload = append(first.Payload, []byte("01234567")...) - - pool.Put(first) - - second := pool.Get() - - testutil.AssertEventSequence( - t, - "rejected pointer Put() path", - events, - []string{"reuse:oversized", "drop:oversized"}, - ) - if second == first { - t.Fatalf("Get() reused rejected pointer %p, want newly constructed instance", second) - } - if second.ID != 2 { - t.Fatalf("Get() after rejected Put() returned ID %d, want 2 from fresh construction", second.ID) - } - if first.State != "oversized" { - t.Fatalf("rejected object state = %q, want %q (reset must not run)", first.State, "oversized") - } - if len(first.Payload) != 8 { - t.Fatalf("rejected object payload length = %d, want 8 (reset must not run)", len(first.Payload)) - } - if !first.DropSeen { - t.Fatal("rejected object was not marked as dropped, want drop hook to run") - } - if calls != (poolHookCalls{new: 2, reuse: 1, drop: 1}) { - t.Fatalf("hook calls after rejected pointer round-trip = %+v, want new=2 reset=0 reuse=1 drop=1", calls) - } - }) - }) + pool.Put(value) - t.Run("value-typed pools follow the same reuse and drop contract", func(t *testing.T) { - testutil.WithControlledSteadyStatePoolRoundTrip(t, func() { - type value struct { - ID int - Dirty bool - } + testutil.AssertEventSequence( + t, + "accepted pointer Put() path", + *events, + []string{"reuse:dirty", "reset"}, + ) + if value.State != "clean" { + t.Fatalf("accepted pointer state after Put() = %q, want %q", value.State, "clean") + } + if value.Flag { + t.Fatal("accepted pointer retained dirty Flag after Put(), want false") + } + if len(value.Payload) != 0 { + t.Fatalf("accepted pointer payload length after Put() = %d, want 0", len(value.Payload)) + } + if !value.ResetSeen { + t.Fatal("accepted pointer does not show reset marker after Put()") + } + if value.DropSeen { + t.Fatal("accepted pointer was marked as dropped, want drop hook to remain idle") + } + if *calls != (poolHookCalls{reset: 1, reuse: 1}) { + t.Fatalf("hook calls for accepted pointer Put() = %+v, want reset=1 reuse=1", *calls) + } +} - calls := poolHookCalls{} - events := make([]string, 0, 4) - pool := New(Options[value]{ - New: func() value { - calls.new++ - return value{ID: calls.new} - }, - Reset: func(v value) { - calls.reset++ - events = append(events, fmt.Sprintf("reset:%d", v.ID)) - // Value-typed T is copied into ResetFunc. The public contract we - // care about here is that reset still runs on accepted values and - // drop still runs on rejected ones. - }, - Reuse: func(v value) bool { - calls.reuse++ - events = append(events, fmt.Sprintf("reuse:%d:%t", v.ID, v.Dirty)) - return !v.Dirty - }, - OnDrop: func(v value) { - calls.drop++ - events = append(events, fmt.Sprintf("drop:%d", v.ID)) - }, - }) - - first := pool.Get() - if first.ID != 1 { - t.Fatalf("first Get() value ID = %d, want 1", first.ID) - } - pool.Put(first) +func TestPoolPutRejectedPointerValue(t *testing.T) { + calls, events, pool := newPointerPutTestPool(4) + value := &poolTestObject{ + ID: 42, + State: "oversized", + Payload: append(make([]byte, 0, 8), []byte("01234567")...), + } - second := pool.Get() - if second.ID != 1 { - t.Fatalf("Get() after accepted value Put() returned ID %d, want 1", second.ID) - } + pool.Put(value) - second.Dirty = true - pool.Put(second) + testutil.AssertEventSequence( + t, + "rejected pointer Put() path", + *events, + []string{"reuse:oversized", "drop:oversized"}, + ) + if value.State != "oversized" { + t.Fatalf("rejected pointer state after Put() = %q, want %q", value.State, "oversized") + } + if len(value.Payload) != 8 { + t.Fatalf("rejected pointer payload length after Put() = %d, want 8", len(value.Payload)) + } + if !value.DropSeen { + t.Fatal("rejected pointer was not marked as dropped") + } + if value.ResetSeen { + t.Fatal("rejected pointer was marked as reset, want reset hook to stay idle") + } + if *calls != (poolHookCalls{reuse: 1, drop: 1}) { + t.Fatalf("hook calls for rejected pointer Put() = %+v, want reuse=1 drop=1", *calls) + } - third := pool.Get() + got := pool.Get() + if got.ID != 1 { + t.Fatalf("Get() after rejected Put() returned ID %d, want 1 from fresh construction", got.ID) + } + if *calls != (poolHookCalls{new: 1, reuse: 1, drop: 1}) { + t.Fatalf("hook calls after rejected pointer Put()/Get() = %+v, want new=1 reuse=1 drop=1", *calls) + } +} - testutil.AssertEventSequence( - t, - "value-typed Put() paths", - events, - []string{"reuse:1:false", "reset:1", "reuse:1:true", "drop:1"}, - ) - if third.ID != 2 { - t.Fatalf("Get() after rejected dirty value returned ID %d, want 2", third.ID) - } - if calls != (poolHookCalls{new: 2, reset: 1, reuse: 2, drop: 1}) { - t.Fatalf("hook calls for value-typed paths = %+v, want new=2 reset=1 reuse=2 drop=1", calls) - } - }) +func TestPoolPutValueTypeHonorsReuseAndDropPolicy(t *testing.T) { + calls := poolHookCalls{} + events := make([]string, 0, 4) + pool := New(Options[poolTestValue]{ + New: func() poolTestValue { + calls.new++ + return poolTestValue{ID: calls.new} + }, + Reset: func(v poolTestValue) { + calls.reset++ + events = append(events, fmt.Sprintf("reset:%d", v.ID)) + }, + Reuse: func(v poolTestValue) bool { + calls.reuse++ + events = append(events, fmt.Sprintf("reuse:%d:%t", v.ID, v.Dirty)) + return !v.Dirty + }, + OnDrop: func(v poolTestValue) { + calls.drop++ + events = append(events, fmt.Sprintf("drop:%d", v.ID)) + }, }) - t.Run("panics on nil receiver", func(t *testing.T) { - var pool *Pool[*poolTestObject] + pool.Put(poolTestValue{ID: 41}) + pool.Put(poolTestValue{ID: 42, Dirty: true}) - testutil.AssertPanicMessage( - t, - "(*Pool[*poolTestObject])(nil).Put(nil)", - func() { - pool.Put(nil) - }, - "pool: Put called on nil Pool", - ) - }) + testutil.AssertEventSequence( + t, + "value-typed Put() paths", + events, + []string{"reuse:41:false", "reset:41", "reuse:42:true", "drop:42"}, + ) + if calls != (poolHookCalls{reset: 1, reuse: 2, drop: 1}) { + t.Fatalf("hook calls for value-typed Put() paths = %+v, want reset=1 reuse=2 drop=1", calls) + } +} + +func TestPoolPutNilReceiverPanics(t *testing.T) { + var pool *Pool[*poolTestObject] + + testutil.AssertPanicMessage( + t, + "(*Pool[*poolTestObject])(nil).Put(nil)", + func() { + pool.Put(nil) + }, + "pool: Put called on nil Pool", + ) } func TestPoolConcurrentGetPut(t *testing.T) { @@ -354,11 +271,11 @@ func TestPoolConcurrentGetPut(t *testing.T) { var wg sync.WaitGroup wg.Add(goroutines) - for i := 0; i < goroutines; i++ { + for range goroutines { go func() { defer wg.Done() - for j := 0; j < iterations; j++ { + for range iterations { object := pool.Get() if object == nil { t.Error("Get() returned nil object during concurrent use") @@ -390,3 +307,37 @@ func TestPoolConcurrentGetPut(t *testing.T) { } pool.Put(object) } + +func newPointerPutTestPool(reusePayloadCap int) (*poolHookCalls, *[]string, *Pool[*poolTestObject]) { + calls := &poolHookCalls{} + events := &[]string{} + pool := New(Options[*poolTestObject]{ + New: func() *poolTestObject { + calls.new++ + return &poolTestObject{ + ID: calls.new, + State: "fresh", + Payload: make([]byte, 0, 64), + } + }, + Reset: func(v *poolTestObject) { + calls.reset++ + *events = append(*events, "reset") + v.State = "clean" + v.Flag = false + v.Payload = v.Payload[:0] + v.ResetSeen = true + }, + Reuse: func(v *poolTestObject) bool { + calls.reuse++ + *events = append(*events, fmt.Sprintf("reuse:%s", v.State)) + return cap(v.Payload) <= reusePayloadCap + }, + OnDrop: func(v *poolTestObject) { + calls.drop++ + *events = append(*events, fmt.Sprintf("drop:%s", v.State)) + v.DropSeen = true + }, + }) + return calls, events, pool +} From 5310b411387b29d0da5cf3a8d4d3973cfac0bd2d Mon Sep 17 00:00:00 2001 From: Anton Bochkov Date: Fri, 24 Apr 2026 10:34:15 +0300 Subject: [PATCH 2/6] chore(lint): refine benchmark and test policy --- .golangci.yml | 11 + doc.go | 18 +- internal/backend/syncpool_benchmark_test.go | 20 +- internal/testutil/assertions.go | 2 +- internal/testutil/benchmark.go | 4 +- lifecycle.go | 4 +- options.go | 4 +- options_test.go | 401 ++++++++++---------- pool.go | 8 +- pool_baseline_benchmark_test.go | 21 +- pool_parallel_benchmark_test.go | 8 +- pool_paths_benchmark_test.go | 2 +- pool_shapes_benchmark_test.go | 2 +- 13 files changed, 260 insertions(+), 245 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index e72fd09..37caf96 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -487,6 +487,10 @@ linters: - text: 'comment on exported \S+ \S+ should be of the form ".+"' source: '// ?(nolint|TODO)' linters: [ revive, staticcheck ] + # The repository intentionally mixes black-box API tests with in-package + # white-box tests and benchmarks that need unexported lifecycle/backend + # access. Enforcing external-only test packages would weaken coverage + # more than it would improve structure here. - path: '_test\.go' linters: - bodyclose @@ -496,4 +500,11 @@ linters: - goconst - gosec - noctx + - testpackage - wrapcheck + # Benchmark sink globals and shared benchmark payload fixtures are + # deliberate: they prevent compiler elimination and keep benchmark source + # material allocation-free across iterations. + - path: '(_benchmark_test\.go|internal/testutil/payload\.go)' + linters: + - gochecknoglobals diff --git a/doc.go b/doc.go index 6bdfe84..009fc2e 100644 --- a/doc.go +++ b/doc.go @@ -38,7 +38,7 @@ // p.Put(v) // // This package is therefore best understood as a typed, policy-driven runtime -// layered over sync.Pool-style temporary reuse rather than as a comprehensive +// layered over [sync.Pool]-style temporary reuse rather than as a comprehensive // resource manager. // // # Design model @@ -47,7 +47,7 @@ // // 1. [Options], which describes lifecycle policy declaratively; // 2. [Pool], which exposes the public Get/Put runtime API; -// 3. an internal backend, currently implemented on top of sync.Pool, which +// 3. an internal backend, currently implemented on top of [sync.Pool], which // provides low-level storage and retrieval. // // These layers intentionally separate concerns: @@ -176,14 +176,14 @@ // // [DropFunc] is best-effort observation of explicit rejection-by-policy. It is // not a finalizer, and it is not guaranteed to run for every value that ever -// becomes unreachable. Once a value has been accepted into a sync.Pool-style +// becomes unreachable. Once a value has been accepted into a [sync.Pool]-style // backend, that backend may later discard it without further notification. // // # Concurrency model // // A [Pool] is safe for concurrent use by multiple goroutines. Its lifecycle // policy becomes immutable after construction, and backend storage is delegated -// to an internal sync.Pool-backed adapter. +// to an internal [sync.Pool]-backed adapter. // // However, the borrowed value returned by Get belongs to one logical owner at a // time. The package does not make the borrowed value itself concurrency-safe. @@ -222,15 +222,15 @@ // - avoid copying large mutable state; // - align naturally with object-pooling usage patterns in Go; // - make ownership easier to reason about; -// - usually interact better with sync.Pool-style reuse than large value +// - usually interact better with [sync.Pool]-style reuse than large value // objects do. // // Value types are supported, but they should be chosen deliberately rather than // by accident. // -// # Relationship to sync.Pool +// # Relationship to [sync.Pool] // -// The package intentionally follows the temporary-object model of sync.Pool. +// The package intentionally follows the temporary-object model of [sync.Pool]. // It does not attempt to hide that heritage. // // In particular, callers should assume the same broad operational shape: @@ -241,7 +241,7 @@ // - the package is designed for reducing allocation pressure for temporary // values, not for maintaining a durable reserve of objects. // -// What this package adds on top of sync.Pool is not a different storage model, +// What this package adds on top of [sync.Pool] is not a different storage model, // but a better public runtime contract: // // - typed access through [Pool[T]]; @@ -262,7 +262,7 @@ // - generic "resource management" abstractions beyond temporary value reuse. // // If a caller needs those features, it likely needs a different kind of -// system than a sync.Pool-style temporary object reuse runtime. +// system than a [sync.Pool]-style temporary object reuse runtime. // // # Internal structure // diff --git a/internal/backend/syncpool_benchmark_test.go b/internal/backend/syncpool_benchmark_test.go index 0d4de9e..eb49707 100644 --- a/internal/backend/syncpool_benchmark_test.go +++ b/internal/backend/syncpool_benchmark_test.go @@ -24,7 +24,7 @@ import ( ) // The backend benchmark suite exists to measure only the low-level typed -// adapter over sync.Pool. +// adapter over [sync.Pool]. // // These benchmarks intentionally avoid lifecycle policy concerns such as: // - reset cost; @@ -79,11 +79,19 @@ var ( syncPoolBenchmarkValueSink syncPoolBenchmarkValue ) +func putSyncPoolBenchmarkValue( + p *SyncPool[syncPoolBenchmarkValue], + v syncPoolBenchmarkValue, +) { + //nolint:staticcheck // This benchmark intentionally measures the by-value backend contrast path. + p.Put(v) +} + // BenchmarkSyncPool_GetMiss measures the pure backend miss path. // // The benchmark deliberately never returns values to the backend. As a result, // every Get call must fall through to the constructor installed in -// sync.Pool.New. +// [sync.Pool.New]. // // This benchmark answers two questions: // - what is the cost of a constructor miss at the backend layer; @@ -172,7 +180,9 @@ func BenchmarkSyncPool_ControlledGetPut_Value(b *testing.B) { return syncPoolBenchmarkValue{} }) - testutil.PrimePoolValue(p.Get, p.Put) + testutil.PrimePoolValue(p.Get, func(v syncPoolBenchmarkValue) { + putSyncPoolBenchmarkValue(p, v) + }) news = 0 b.ReportAllocs() @@ -182,7 +192,7 @@ func BenchmarkSyncPool_ControlledGetPut_Value(b *testing.B) { v := p.Get() v.A = uint64(i) v.B = uint64(i * 2) - p.Put(v) + putSyncPoolBenchmarkValue(p, v) syncPoolBenchmarkValueSink = v } @@ -203,7 +213,7 @@ func BenchmarkSyncPool_ControlledGetPut_Value(b *testing.B) { // - pure round-trip cost on already reusable values. // // A small prefill step is performed before timing begins. The goal is not to -// guarantee zero misses across all Ps, which sync.Pool does not promise, but to +// guarantee zero misses across all Ps, which [sync.Pool] does not promise, but to // avoid measuring an entirely cold backend when the question is concurrent // steady-state behaviour. func BenchmarkSyncPool_RealisticParallel(b *testing.B) { diff --git a/internal/testutil/assertions.go b/internal/testutil/assertions.go index 506c5ea..74c662e 100644 --- a/internal/testutil/assertions.go +++ b/internal/testutil/assertions.go @@ -44,7 +44,7 @@ func AssertPanicMessage(tb testing.TB, scenario string, fn func(), want string) } // MustPanic runs fn and returns the recovered panic value formatted with -// fmt.Sprint. +// [fmt.Sprint]. // // If fn does not panic, the helper fails the test immediately. This keeps // panic-based contract checks concise while preserving the scenario name in the diff --git a/internal/testutil/benchmark.go b/internal/testutil/benchmark.go index 0e5e375..b1819f1 100644 --- a/internal/testutil/benchmark.go +++ b/internal/testutil/benchmark.go @@ -35,7 +35,7 @@ func PrimePoolValue[T any](get func() T, put func(T)) { // Parallel benchmarks use this helper to avoid measuring an entirely cold pool // when the question is concurrent steady-state behaviour. func PrefillPool[T any](count int, newFn func() T, put func(T)) { - for i := 0; i < count; i++ { + for range count { put(newFn()) } } @@ -44,7 +44,7 @@ func PrefillPool[T any](count int, newFn func() T, put func(T)) { // pool benchmarks. // // The value scales with the current GOMAXPROCS setting so the prefill step is -// proportional to the number of active Ps that may participate in sync.Pool +// proportional to the number of active Ps that may participate in [sync.Pool] // local-cache distribution. func ParallelWarmCount() int { return runtime.GOMAXPROCS(0) * parallelWarmFactor diff --git a/lifecycle.go b/lifecycle.go index eecaa48..bf7d83e 100644 --- a/lifecycle.go +++ b/lifecycle.go @@ -35,7 +35,7 @@ package pool // // Keeping the sink contract this small has several benefits: // -// - lifecycle code remains independent of sync.Pool-specific details; +// - lifecycle code remains independent of [sync.Pool]-specific details; // - the public runtime can be structured as policy above, backend below; // - alternate internal backends or debug wrappers can be introduced without // rewriting lifecycle semantics; @@ -207,7 +207,7 @@ func (l lifecycle[T]) ResetForReuse(value T) { // // The method intentionally does not guarantee that every ultimately discarded // object in the system will pass through this code path. Once a value has been -// accepted into a sync.Pool-style backend, that backend may later release it +// accepted into a [sync.Pool]-style backend, that backend may later release it // without further callbacks. ObserveDrop therefore represents explicit // rejection-by-policy, not a universal finalizer mechanism. func (l lifecycle[T]) ObserveDrop(value T) { diff --git a/options.go b/options.go index 50e05d1..78f9111 100644 --- a/options.go +++ b/options.go @@ -163,7 +163,7 @@ type Options[T any] struct { // For most real-world use cases, T SHOULD be a pointer-like type such as // *MyState, *RequestContext, or *Builder. Pointer-like values are typically the // best fit for object pooling because they avoid copying large mutable state -// and align well with the intended usage pattern of sync.Pool-backed designs. +// and align well with the intended usage pattern of [sync.Pool]-backed designs. // // Example: // @@ -282,7 +282,7 @@ type ReuseFunc[T any] func(T) bool // // DropFunc MUST be treated as a best-effort callback. It is not a lifecycle // guarantee that a value will always pass through this hook before becoming -// unreachable. In particular, values obtained from a sync.Pool-style backend +// unreachable. In particular, values obtained from a [sync.Pool]-style backend // may disappear without an explicit callback once stored in the backend. // // DropFunc SHOULD remain lightweight. Heavy logging, allocation, blocking I/O, diff --git a/options_test.go b/options_test.go index 1bcf9a9..adb4f44 100644 --- a/options_test.go +++ b/options_test.go @@ -52,222 +52,211 @@ type optionsTestValue struct { N int } -func TestOptionsResolve(t *testing.T) { - t.Run("panics when New is nil", func(t *testing.T) { - testutil.AssertPanicMessage( - t, - "Options.resolve() with nil New", - func() { - var opts Options[*optionsTestObject] - _ = opts.resolve() - }, - "pool: Options.New must not be nil", - ) - }) - - t.Run("does not invoke hooks during resolution", func(t *testing.T) { - calls := optionsHookCalls{} - opts := Options[*optionsTestObject]{ - New: func() *optionsTestObject { - calls.new++ - return &optionsTestObject{} - }, - Reset: func(*optionsTestObject) { - calls.reset++ - }, - Reuse: func(*optionsTestObject) bool { - calls.reuse++ - return true - }, - OnDrop: func(*optionsTestObject) { - calls.drop++ - }, - } - - _ = opts.resolve() - - if calls != (optionsHookCalls{}) { - t.Fatalf("resolve() invoked hooks eagerly: got %+v, want all counters to remain zero", calls) - } - }) - - t.Run("installs defaults for value type", func(t *testing.T) { - resolved := Options[optionsTestValue]{ - New: func() optionsTestValue { - return optionsTestValue{N: 41} - }, - }.resolve() - - if got := resolved.newFn(); got != (optionsTestValue{N: 41}) { - t.Fatalf("resolved newFn() = %+v, want %+v", got, optionsTestValue{N: 41}) - } - - value := optionsTestValue{N: 99} - resolved.resetFn(value) - if value != (optionsTestValue{N: 99}) { - t.Fatalf("default resetFn mutated value-typed input: got %+v, want %+v", value, optionsTestValue{N: 99}) - } - - if !resolved.reuseFn(optionsTestValue{N: -1}) { - t.Fatal("default reuseFn returned false for value-typed input, want true") - } - - // The default drop policy is intentionally just "do nothing and do not - // panic"; there is no additional state to assert here. - resolved.dropFn(optionsTestValue{N: 7}) - }) - - t.Run("installs defaults for pointer type", func(t *testing.T) { - expected := &optionsTestObject{count: 5, payload: []byte("abc")} - resolved := Options[*optionsTestObject]{ - New: func() *optionsTestObject { - return expected - }, - }.resolve() - - if got := resolved.newFn(); got != expected { - t.Fatalf("resolved newFn() pointer = %p, want %p", got, expected) - } - - object := &optionsTestObject{count: 77, payload: []byte("payload")} - beforeCount := object.count - beforePayload := string(object.payload) - // The default reset policy must remain a true no-op for pointer-backed T: - // resolve() should install noopReset, not synthesize cleanup. - resolved.resetFn(object) - if object.count != beforeCount || string(object.payload) != beforePayload { - t.Fatalf( - "default resetFn mutated pointer-backed input: got count=%d payload=%q, want count=%d payload=%q", - object.count, - string(object.payload), - beforeCount, - beforePayload, - ) - } +func TestOptionsResolvePanicsWhenNewIsNil(t *testing.T) { + testutil.AssertPanicMessage( + t, + "Options.resolve() with nil New", + func() { + var opts Options[*optionsTestObject] + _ = opts.resolve() + }, + "pool: Options.New must not be nil", + ) +} - if !resolved.reuseFn(object) { - t.Fatal("default reuseFn returned false for pointer-typed input, want true") - } +func TestOptionsResolveDoesNotInvokeHooks(t *testing.T) { + calls := optionsHookCalls{} + opts := Options[*optionsTestObject]{ + New: func() *optionsTestObject { + calls.new++ + return &optionsTestObject{} + }, + Reset: func(*optionsTestObject) { + calls.reset++ + }, + Reuse: func(*optionsTestObject) bool { + calls.reuse++ + return true + }, + OnDrop: func(*optionsTestObject) { + calls.drop++ + }, + } + + _ = opts.resolve() + + if calls != (optionsHookCalls{}) { + t.Fatalf("resolve() invoked hooks eagerly: got %+v, want all counters to remain zero", calls) + } +} - resolved.dropFn(object) - }) +func TestOptionsResolveInstallsDefaultsForValueType(t *testing.T) { + resolved := Options[optionsTestValue]{ + New: func() optionsTestValue { + return optionsTestValue{N: 41} + }, + }.resolve() - t.Run("preserves custom hooks", func(t *testing.T) { - calls := optionsHookCalls{} - marker := &optionsTestObject{count: 10, payload: []byte("marker")} - - resolved := Options[*optionsTestObject]{ - New: func() *optionsTestObject { - calls.new++ - return marker - }, - Reset: func(v *optionsTestObject) { - calls.reset++ - v.count = 0 - v.payload = v.payload[:0] - v.resetSeen = true - }, - Reuse: func(v *optionsTestObject) bool { - calls.reuse++ - return v.count <= 64 - }, - OnDrop: func(v *optionsTestObject) { - calls.drop++ - v.dropSeen = true - }, - }.resolve() - - if got := resolved.newFn(); got != marker { - t.Fatalf("resolved newFn() pointer = %p, want %p", got, marker) - } - if calls.new != 1 { - t.Fatalf("custom newFn call count = %d, want 1", calls.new) - } + if got := resolved.newFn(); got != (optionsTestValue{N: 41}) { + t.Fatalf("resolved newFn() = %+v, want %+v", got, optionsTestValue{N: 41}) + } - reusable := &optionsTestObject{count: 32, payload: []byte("retain")} - // Accepted objects must flow through Reuse and then Reset, while OnDrop - // must remain untouched on the retained path. - if !resolved.reuseFn(reusable) { - t.Fatal("custom reuseFn rejected reusable object, want true") - } - resolved.resetFn(reusable) + value := optionsTestValue{N: 99} + resolved.resetFn(value) + if value != (optionsTestValue{N: 99}) { + t.Fatalf("default resetFn mutated value-typed input: got %+v, want %+v", value, optionsTestValue{N: 99}) + } - if calls.reuse != 1 { - t.Fatalf("custom reuseFn call count after accepted path = %d, want 1", calls.reuse) - } - if calls.reset != 1 { - t.Fatalf("custom resetFn call count after accepted path = %d, want 1", calls.reset) - } - if reusable.count != 0 { - t.Fatalf("custom resetFn left count = %d, want 0", reusable.count) - } - if len(reusable.payload) != 0 { - t.Fatalf("custom resetFn left payload length = %d, want 0", len(reusable.payload)) - } - if !reusable.resetSeen { - t.Fatal("custom resetFn did not mark resetSeen") - } - if reusable.dropSeen { - t.Fatal("drop callback ran on accepted object, want it to remain untouched") - } + if !resolved.reuseFn(optionsTestValue{N: -1}) { + t.Fatal("default reuseFn returned false for value-typed input, want true") + } - dropped := &optionsTestObject{count: 128, payload: []byte("drop")} - // Rejected objects follow the opposite path: Reuse returns false and the - // resolved drop callback remains callable with the original semantics. - if resolved.reuseFn(dropped) { - t.Fatal("custom reuseFn accepted dropped object, want false") - } - resolved.dropFn(dropped) + resolved.dropFn(optionsTestValue{N: 7}) +} - if calls.reuse != 2 { - t.Fatalf("custom reuseFn call count after dropped path = %d, want 2", calls.reuse) - } - if calls.drop != 1 { - t.Fatalf("custom dropFn call count = %d, want 1", calls.drop) - } - if !dropped.dropSeen { - t.Fatal("custom dropFn did not mark dropSeen") - } - }) +func TestOptionsResolveInstallsDefaultsForPointerType(t *testing.T) { + expected := &optionsTestObject{count: 5, payload: []byte("abc")} + resolved := Options[*optionsTestObject]{ + New: func() *optionsTestObject { + return expected + }, + }.resolve() + + if got := resolved.newFn(); got != expected { + t.Fatalf("resolved newFn() pointer = %p, want %p", got, expected) + } + + object := &optionsTestObject{count: 77, payload: []byte("payload")} + beforeCount := object.count + beforePayload := string(object.payload) + + resolved.resetFn(object) + + if object.count != beforeCount || string(object.payload) != beforePayload { + t.Fatalf( + "default resetFn mutated pointer-backed input: got count=%d payload=%q, want count=%d payload=%q", + object.count, + string(object.payload), + beforeCount, + beforePayload, + ) + } + if !resolved.reuseFn(object) { + t.Fatal("default reuseFn returned false for pointer-typed input, want true") + } - t.Run("allows mixing custom and default hooks", func(t *testing.T) { - resetCalls := 0 - reuseCalls := 0 - - resolved := Options[*optionsTestObject]{ - New: func() *optionsTestObject { - return &optionsTestObject{} - }, - Reset: func(v *optionsTestObject) { - resetCalls++ - v.count = 0 - }, - Reuse: func(v *optionsTestObject) bool { - reuseCalls++ - return v.count == 0 - }, - }.resolve() - - object := &optionsTestObject{count: 0} - if !resolved.reuseFn(object) { - t.Fatal("custom reuseFn rejected accepted object, want true") - } - resolved.resetFn(object) + resolved.dropFn(object) +} - if object.count != 0 { - t.Fatalf("custom resetFn left count = %d, want 0", object.count) - } - if resetCalls != 1 { - t.Fatalf("custom resetFn call count = %d, want 1", resetCalls) - } - if reuseCalls != 1 { - t.Fatalf("custom reuseFn call count = %d, want 1", reuseCalls) - } +func TestOptionsResolvePreservesCustomHooks(t *testing.T) { + calls := optionsHookCalls{} + marker := &optionsTestObject{count: 10, payload: []byte("marker")} + + resolved := Options[*optionsTestObject]{ + New: func() *optionsTestObject { + calls.new++ + return marker + }, + Reset: func(v *optionsTestObject) { + calls.reset++ + v.count = 0 + v.payload = v.payload[:0] + v.resetSeen = true + }, + Reuse: func(v *optionsTestObject) bool { + calls.reuse++ + return v.count <= 64 + }, + OnDrop: func(v *optionsTestObject) { + calls.drop++ + v.dropSeen = true + }, + }.resolve() + + if got := resolved.newFn(); got != marker { + t.Fatalf("resolved newFn() pointer = %p, want %p", got, marker) + } + if calls.new != 1 { + t.Fatalf("custom newFn call count = %d, want 1", calls.new) + } + + reusable := &optionsTestObject{count: 32, payload: []byte("retain")} + if !resolved.reuseFn(reusable) { + t.Fatal("custom reuseFn rejected reusable object, want true") + } + resolved.resetFn(reusable) + + if calls.reuse != 1 { + t.Fatalf("custom reuseFn call count after accepted path = %d, want 1", calls.reuse) + } + if calls.reset != 1 { + t.Fatalf("custom resetFn call count after accepted path = %d, want 1", calls.reset) + } + if reusable.count != 0 { + t.Fatalf("custom resetFn left count = %d, want 0", reusable.count) + } + if len(reusable.payload) != 0 { + t.Fatalf("custom resetFn left payload length = %d, want 0", len(reusable.payload)) + } + if !reusable.resetSeen { + t.Fatal("custom resetFn did not mark resetSeen") + } + if reusable.dropSeen { + t.Fatal("drop callback ran on accepted object, want it to remain untouched") + } + + dropped := &optionsTestObject{count: 128, payload: []byte("drop")} + if resolved.reuseFn(dropped) { + t.Fatal("custom reuseFn accepted dropped object, want false") + } + resolved.dropFn(dropped) + + if calls.reuse != 2 { + t.Fatalf("custom reuseFn call count after dropped path = %d, want 2", calls.reuse) + } + if calls.drop != 1 { + t.Fatalf("custom dropFn call count = %d, want 1", calls.drop) + } + if !dropped.dropSeen { + t.Fatal("custom dropFn did not mark dropSeen") + } +} - // OnDrop was left nil on purpose. The resolved drop hook should therefore - // remain the default no-op and be safely callable here. - resolved.dropFn(&optionsTestObject{count: 99}) - }) +func TestOptionsResolveAllowsMixingCustomAndDefaultHooks(t *testing.T) { + resetCalls := 0 + reuseCalls := 0 + + resolved := Options[*optionsTestObject]{ + New: func() *optionsTestObject { + return &optionsTestObject{} + }, + Reset: func(v *optionsTestObject) { + resetCalls++ + v.count = 0 + }, + Reuse: func(v *optionsTestObject) bool { + reuseCalls++ + return v.count == 0 + }, + }.resolve() + + object := &optionsTestObject{count: 0} + if !resolved.reuseFn(object) { + t.Fatal("custom reuseFn rejected accepted object, want true") + } + resolved.resetFn(object) + + if object.count != 0 { + t.Fatalf("custom resetFn left count = %d, want 0", object.count) + } + if resetCalls != 1 { + t.Fatalf("custom resetFn call count = %d, want 1", resetCalls) + } + if reuseCalls != 1 { + t.Fatalf("custom reuseFn call count = %d, want 1", reuseCalls) + } + + resolved.dropFn(&optionsTestObject{count: 99}) } func TestDefaultPolicies(t *testing.T) { diff --git a/pool.go b/pool.go index 26ac906..01072a9 100644 --- a/pool.go +++ b/pool.go @@ -26,7 +26,7 @@ import "arcoris.dev/pool/internal/backend" // 1. value construction on the slow path, via [Options.New]; // 2. lifecycle policy on return, via [Options.Reset], [Options.Reuse], and // [Options.OnDrop]; -// 3. low-level storage and retrieval, via an internal sync.Pool-backed +// 3. low-level storage and retrieval, via an internal [sync.Pool]-backed // backend. // // In practical terms, Pool exists so that callers can work with the following @@ -61,7 +61,7 @@ import "arcoris.dev/pool/internal/backend" // // # Design model // -// Pool deliberately follows the temporary-object model of sync.Pool rather than +// Pool deliberately follows the temporary-object model of [sync.Pool] rather than // the richer semantics of a full object-lifecycle manager. In particular: // // - objects returned to the pool may later disappear from the backend without @@ -87,7 +87,7 @@ import "arcoris.dev/pool/internal/backend" // # Concurrency // // Pool is safe for concurrent use by multiple goroutines. Backend storage is -// delegated to sync.Pool through an internal adapter, and lifecycle policy is +// delegated to [sync.Pool] through an internal adapter, and lifecycle policy is // immutable after construction. // // What is and is not concurrency-safe: @@ -183,7 +183,7 @@ type Pool[T any] struct { // // New is the only supported constructor for Pool. It validates and resolves the // public [Options] value, assembles the internal lifecycle controller, and -// creates the internal sync.Pool-backed backend. +// creates the internal [sync.Pool]-backed backend. // // # Construction steps // diff --git a/pool_baseline_benchmark_test.go b/pool_baseline_benchmark_test.go index 78a5d4c..0de5b44 100644 --- a/pool_baseline_benchmark_test.go +++ b/pool_baseline_benchmark_test.go @@ -29,7 +29,7 @@ import ( // answer only the baseline comparison questions defined by the benchmark // matrix: // - what plain allocation looks like for a temporary-object shape; -// - what direct sync.Pool usage looks like for the same shape; +// - what direct [sync.Pool] usage looks like for the same shape; // - what arcoris.dev/pool adds on top of that baseline. // // The suite mixes two serial baseline modes on purpose: @@ -84,6 +84,11 @@ var ( baselineValueSink baselineValue ) +func putBaselineValueIntoRawSyncPool(p *sync.Pool, v baselineValue) { + //nolint:staticcheck // This contrast benchmark intentionally measures by-value [sync.Pool] storage. + p.Put(v) +} + // newBaselinePointer constructs a fresh pointer-like benchmark value with a // stable reusable scratch slice. // @@ -151,7 +156,7 @@ func benchmarkBaselineAllocOnlyPointer(b *testing.B) { } // benchmarkBaselineControlledRawSyncPoolPointer implements the controlled steady-state -// direct sync.Pool pointer baseline used by both the canonical benchmark and +// direct [sync.Pool] pointer baseline used by both the canonical benchmark and // the compare suite. func benchmarkBaselineControlledRawSyncPoolPointer(b *testing.B) { testutil.WithControlledSteadyStatePoolRoundTrip(b, func() { @@ -237,7 +242,7 @@ func benchmarkBaselineAllocOnlyValue(b *testing.B) { } // benchmarkBaselineControlledRawSyncPoolValue implements the controlled steady-state -// direct sync.Pool value baseline. +// direct [sync.Pool] value baseline. func benchmarkBaselineControlledRawSyncPoolValue(b *testing.B) { testutil.WithControlledSteadyStatePoolRoundTrip(b, func() { var news uint64 @@ -252,7 +257,7 @@ func benchmarkBaselineControlledRawSyncPoolValue(b *testing.B) { testutil.PrimePoolValue(func() baselineValue { return p.Get().(baselineValue) }, func(v baselineValue) { - p.Put(v) + putBaselineValueIntoRawSyncPool(p, v) }) news = 0 @@ -263,7 +268,7 @@ func benchmarkBaselineControlledRawSyncPoolValue(b *testing.B) { v := p.Get().(baselineValue) v = exerciseBaselineValue(v, i) baselineValueSink = v - p.Put(v) + putBaselineValueIntoRawSyncPool(p, v) } b.StopTimer() @@ -313,7 +318,7 @@ func BenchmarkBaseline_AllocOnly_Pointer(b *testing.B) { } // BenchmarkBaseline_Controlled_RawSyncPool_Pointer measures the controlled -// steady-state direct sync.Pool reuse path for the pointer-like temporary +// steady-state direct [sync.Pool] reuse path for the pointer-like temporary // object shape. // // The harness pins execution to one P, disables GC for the timed body, and @@ -327,7 +332,7 @@ func BenchmarkBaseline_Controlled_RawSyncPool_Pointer(b *testing.B) { // BenchmarkBaseline_Controlled_ARCORISPool_Pointer measures the controlled // steady-state public runtime for the same pointer-like temporary-object shape. // -// Like the raw sync.Pool companion, this is an upper-bound hot path serial +// Like the raw [sync.Pool] companion, this is an upper-bound hot path serial // reuse benchmark. It is useful for localizing public runtime overhead relative // to the closest low-level baseline, but it is not a general package // performance claim on its own. @@ -342,7 +347,7 @@ func BenchmarkBaseline_AllocOnly_Value(b *testing.B) { } // BenchmarkBaseline_Controlled_RawSyncPool_Value measures the controlled -// steady-state direct sync.Pool reuse path for the canonical value-type +// steady-state direct [sync.Pool] reuse path for the canonical value-type // benchmark shape. func BenchmarkBaseline_Controlled_RawSyncPool_Value(b *testing.B) { benchmarkBaselineControlledRawSyncPoolValue(b) diff --git a/pool_parallel_benchmark_test.go b/pool_parallel_benchmark_test.go index 3388ee3..6be6c07 100644 --- a/pool_parallel_benchmark_test.go +++ b/pool_parallel_benchmark_test.go @@ -29,7 +29,7 @@ import ( // The suite exists to make concurrent behaviour visible separately from the // serial benchmarks. Serial steady-state results are useful, but they do not // answer how the runtime behaves under concurrent Get/Put traffic or how much -// extra cost comes from package orchestration relative to direct sync.Pool +// extra cost comes from package orchestration relative to direct [sync.Pool] // usage. // // Unlike the controlled serial hot path benchmarks elsewhere in the suite, @@ -49,7 +49,7 @@ type parallelAccepted struct { } // parallelCompare is the compact pointer-backed shape shared by the raw -// sync.Pool and public-runtime parallel comparison baselines. +// [sync.Pool] and public-runtime parallel comparison baselines. type parallelCompare struct { ID uint64 Data [16]byte @@ -142,7 +142,7 @@ func BenchmarkParallel_RealisticRejected(b *testing.B) { benchmarkRealisticParallelRejected(b) } -// benchmarkRealisticParallelRawSyncPool measures concurrent direct sync.Pool +// benchmarkRealisticParallelRawSyncPool measures concurrent direct [sync.Pool] // usage as the closest low-level external baseline for the package runtime. func benchmarkRealisticParallelRawSyncPool(b *testing.B) { var news atomic.Uint64 @@ -179,7 +179,7 @@ func benchmarkRealisticParallelRawSyncPool(b *testing.B) { testutil.ReportPerOpMetric(b, news.Load(), testutil.MetricNewsPerOp) } -// BenchmarkParallel_RealisticRawSyncPool measures concurrent direct sync.Pool +// BenchmarkParallel_RealisticRawSyncPool measures concurrent direct [sync.Pool] // usage as the closest low-level external baseline for the package runtime. func BenchmarkParallel_RealisticRawSyncPool(b *testing.B) { benchmarkRealisticParallelRawSyncPool(b) diff --git a/pool_paths_benchmark_test.go b/pool_paths_benchmark_test.go index 654b4db..8a5255e 100644 --- a/pool_paths_benchmark_test.go +++ b/pool_paths_benchmark_test.go @@ -26,7 +26,7 @@ import ( // arcoris.dev/pool. // // Its responsibility is intentionally narrow. These benchmarks do not compare -// the package against plain allocation or direct sync.Pool usage; that belongs +// the package against plain allocation or direct [sync.Pool] usage; that belongs // to pool_baseline_benchmark_test.go. They also do not explore broader type // shape sensitivity or parallel scaling; those concerns belong to the shapes // and parallel benchmark suites. diff --git a/pool_shapes_benchmark_test.go b/pool_shapes_benchmark_test.go index d0b6477..f43f4b4 100644 --- a/pool_shapes_benchmark_test.go +++ b/pool_shapes_benchmark_test.go @@ -26,7 +26,7 @@ import ( // // The purpose of this suite is to isolate how the shape of T changes the cost // profile of the public runtime. These benchmarks do not compare against plain -// allocation or direct sync.Pool usage. Those questions belong to the baseline +// allocation or direct [sync.Pool] usage. Those questions belong to the baseline // suite. They also do not focus on parallelism, which belongs to the parallel // suite. // From 69d2da3acd8c73d53e94038bef2f8a2f563af919 Mon Sep 17 00:00:00 2001 From: Anton Bochkov Date: Fri, 24 Apr 2026 10:34:51 +0300 Subject: [PATCH 3/6] ci(benchmark): avoid secondary smoke artifact failures --- .github/workflows/benchmark-smoke.yml | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/.github/workflows/benchmark-smoke.yml b/.github/workflows/benchmark-smoke.yml index 4551699..b65014d 100644 --- a/.github/workflows/benchmark-smoke.yml +++ b/.github/workflows/benchmark-smoke.yml @@ -73,6 +73,12 @@ jobs: python3 --version bash --version | head -n 1 + - name: "Prepare smoke artifact directory" + shell: "bash" + run: | + set -euo pipefail + mkdir -p .ci-smoke + - name: "Verify benchmark tooling entrypoints exist" shell: "bash" run: | @@ -116,7 +122,6 @@ jobs: run: | set -euo pipefail - mkdir -p .ci-smoke go test ./internal/backend \ -run '^$' \ -bench '^BenchmarkSyncPool_GetMiss$' \ @@ -193,9 +198,10 @@ jobs: } >> "$GITHUB_STEP_SUMMARY" - name: "Upload smoke artifacts" + if: ${{ always() && hashFiles('.ci-smoke/**') != '' }} uses: "actions/upload-artifact@v7" with: name: "benchmark-smoke-artifacts" path: ".ci-smoke" retention-days: 7 - if-no-files-found: "error" + if-no-files-found: "warn" From cd178e4768909ed9fc624988018c14af03ac6f88 Mon Sep 17 00:00:00 2001 From: Anton Bochkov Date: Fri, 24 Apr 2026 11:10:54 +0300 Subject: [PATCH 4/6] ci(github): simplify protected main workflow --- .github/dependabot.yml | 8 +- .github/workflows/attest.yml | 52 ++++--- .github/workflows/benchmark-smoke.yml | 4 +- .github/workflows/ci.yml | 4 +- .github/workflows/commit-lint.yml | 38 +++-- .github/workflows/dependency-review.yml | 2 + .github/workflows/docs-smoke.yml | 7 +- .github/workflows/govulncheck.yml | 7 +- .github/workflows/lint.yml | 6 +- .github/workflows/release.yml | 179 ++++++++++-------------- .github/workflows/scorecards.yml | 8 +- .github/workflows/security-codeql.yml | 4 +- commitlint.config.cjs | 71 ++++------ release.config.cjs | 21 ++- 14 files changed, 177 insertions(+), 234 deletions(-) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index b6a379a..b426352 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -18,6 +18,7 @@ updates: # Go module dependencies. - package-ecosystem: "gomod" directory: "/" + target-branch: "main" schedule: interval: "weekly" day: "monday" @@ -32,7 +33,7 @@ updates: - "dependencies" - "go" commit-message: - prefix: "deps" + prefix: "chore" include: "scope" # Group routine module updates together so maintainers are not flooded with @@ -59,6 +60,7 @@ updates: # GitHub Actions dependencies used by repository automation. - package-ecosystem: "github-actions" directory: "/" + target-branch: "main" schedule: interval: "weekly" day: "monday" @@ -71,7 +73,7 @@ updates: - "dependencies" - "github-actions" commit-message: - prefix: "deps" + prefix: "chore" include: "scope" groups: @@ -85,4 +87,4 @@ updates: ignore: - dependency-name: "*" update-types: - - "version-update:semver-major" \ No newline at end of file + - "version-update:semver-major" diff --git a/.github/workflows/attest.yml b/.github/workflows/attest.yml index 9a58b2f..7e429aa 100644 --- a/.github/workflows/attest.yml +++ b/.github/workflows/attest.yml @@ -17,8 +17,13 @@ "on": push: tags: - - "v*" + - "v*.*.*" workflow_dispatch: + inputs: + release_tag: + description: "Existing stable tag to attest (for example: v1.2.3)" + required: true + type: "string" permissions: # Read repository contents to build the release bundle from the tagged source. @@ -30,7 +35,7 @@ permissions: artifact-metadata: "write" concurrency: - # Only one attestation workflow should run per tag at a time. + # Only one attestation workflow should run per stable tag at a time. group: "attest-${{ github.workflow }}-${{ github.ref }}" cancel-in-progress: true @@ -44,6 +49,8 @@ jobs: - name: "Checkout tagged source" uses: "actions/checkout@v6" with: + # Attest only stable tags. Manual dispatches must supply the exact tag. + ref: "${{ github.event_name == 'workflow_dispatch' && inputs.release_tag || github.ref_name }}" fetch-depth: 0 - name: "Resolve release metadata" @@ -52,12 +59,23 @@ jobs: run: | set -euo pipefail - if [[ "${GITHUB_REF_TYPE:-}" == "tag" ]]; then - tag="${GITHUB_REF_NAME}" + if [[ "${GITHUB_EVENT_NAME}" == "workflow_dispatch" ]]; then + tag="${{ inputs.release_tag }}" else - # workflow_dispatch fallback: - # use the current HEAD short SHA when no release tag triggered the run. - tag="manual-${GITHUB_SHA::12}" + tag="${GITHUB_REF_NAME}" + fi + + if [[ ! "${tag}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then + echo "stable attestations require a SemVer tag in the form vX.Y.Z; got ${tag}" >&2 + exit 1 + fi + + git fetch origin main --force + + release_sha="$(git rev-list -n 1 "${tag}")" + if ! git merge-base --is-ancestor "${release_sha}" "origin/main"; then + echo "stable attestation tag ${tag} does not point to a commit reachable from origin/main" >&2 + exit 1 fi artifact_dir="dist" @@ -67,6 +85,7 @@ jobs: mkdir -p "${artifact_dir}" echo "tag=${tag}" >> "$GITHUB_OUTPUT" + echo "release_sha=${release_sha}" >> "$GITHUB_OUTPUT" echo "artifact_dir=${artifact_dir}" >> "$GITHUB_OUTPUT" echo "artifact_name=${artifact_name}" >> "$GITHUB_OUTPUT" echo "checksum_name=${checksum_name}" >> "$GITHUB_OUTPUT" @@ -79,18 +98,13 @@ jobs: artifact_dir="${{ steps.meta.outputs.artifact_dir }}" artifact_name="${{ steps.meta.outputs.artifact_name }}" checksum_name="${{ steps.meta.outputs.checksum_name }}" + tag="${{ steps.meta.outputs.tag }}" - # Build a deterministic source archive from the checked-out tagged tree. - # - # We use `git archive` instead of a GitHub-generated source ZIP/TGZ so: - # - the repository controls the exact artifact that is attested; - # - the artifact can also be uploaded as a workflow artifact; - # - future release pipelines can reuse the same source-bundle contract. git archive \ --format=tar.gz \ --prefix="arcoris-pool/" \ -o "${artifact_dir}/${artifact_name}" \ - HEAD + "${tag}" sha256sum "${artifact_dir}/${artifact_name}" | tee "${artifact_dir}/${checksum_name}" @@ -107,9 +121,6 @@ jobs: - name: "Generate provenance attestation" uses: "actions/attest@v4" with: - # New GitHub implementations should use `actions/attest`. - # `actions/attest-build-provenance` is now a thin wrapper and its own - # README recommends `actions/attest` for new setups. subject-path: "${{ steps.meta.outputs.artifact_dir }}/${{ steps.meta.outputs.artifact_name }}" - name: "Publish workflow summary" @@ -118,11 +129,10 @@ jobs: { echo "# Attestation Summary" echo + echo "- Stable tag: \`${{ steps.meta.outputs.tag }}\`" + echo "- Source commit: \`${{ steps.meta.outputs.release_sha }}\`" echo "- Subject: \`${{ steps.meta.outputs.artifact_name }}\`" echo "- Checksum file: \`${{ steps.meta.outputs.checksum_name }}\`" - echo "- Trigger: \`${{ github.event_name }}\`" - echo "- Ref: \`${{ github.ref }}\`" + echo "- Policy: stable attestations must point to a commit reachable from \`main\`" echo "- Result: provenance attestation created" - echo - echo "Consumers can verify attestations with the GitHub CLI attestation commands." } >> "$GITHUB_STEP_SUMMARY" diff --git a/.github/workflows/benchmark-smoke.yml b/.github/workflows/benchmark-smoke.yml index b65014d..9132b06 100644 --- a/.github/workflows/benchmark-smoke.yml +++ b/.github/workflows/benchmark-smoke.yml @@ -17,12 +17,10 @@ "on": push: branches: - - "main" - - "next" + - "**" pull_request: branches: - "main" - - "next" merge_group: workflow_dispatch: diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 153fda9..e295d06 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -17,12 +17,10 @@ "on": push: branches: - - "main" - - "next" + - "**" pull_request: branches: - "main" - - "next" # If merge queues are enabled later, keep the same required checks available # for merge groups as well. merge_group: diff --git a/.github/workflows/commit-lint.yml b/.github/workflows/commit-lint.yml index 7b4e523..f4e4d7f 100644 --- a/.github/workflows/commit-lint.yml +++ b/.github/workflows/commit-lint.yml @@ -15,7 +15,7 @@ # Commitlint workflow for repository-local Conventional Commit enforcement. # # Keep this workflow intentionally narrow: -# - validate commit messages, not PR review state; +# - validate commit messages and PR titles, not PR review state; # - use the repository's `commitlint.config.cjs`; # - keep event-to-commit-range logic in a repository script, not inline bash. "name": "Commit Lint" @@ -25,28 +25,13 @@ pull_request: branches: - "main" - - "next" types: - "opened" + - "edited" - "synchronize" - "reopened" - - "edited" - "ready_for_review" - # If merge queues are enabled later, this trigger is required so the same - # status check can run for merge groups as well. - merge_group: - types: - - "checks_requested" - - # Also lint direct pushes to release branches. This is still useful even when - # protected branches are enabled, because it prevents silently accepting a bad - # commit message on any path that bypasses or predates the PR flow. - push: - branches: - - "main" - - "next" - workflow_dispatch: permissions: @@ -61,9 +46,8 @@ concurrency: jobs: commitlint: - # Keep the job name stable. If the repository later enables merge queues, - # GitHub can require this exact status check for both pull_request and - # merge_group events. + # Keep the job name stable so branch protection can require this exact + # check for pull requests into `main`. name: "commitlint" runs-on: "ubuntu-latest" timeout-minutes: 10 @@ -103,8 +87,9 @@ jobs: - name: "Install standalone commitlint toolchain" run: | - # Keep commitlint on the same repository policy as semantic-release: - # no committed Node manifest and no generated lockfile in CI. + # Keep commitlint on the same repository Node-tooling policy as other + # automation here: no committed Node manifest and no generated lockfile + # in CI. npm install --no-save --package-lock=false --no-audit --fund=false \ @commitlint/cli \ @commitlint/config-conventional @@ -115,6 +100,15 @@ jobs: - name: "Show commitlint version" run: npx commitlint --version + - name: "Lint pull request title" + if: ${{ github.event_name == 'pull_request' }} + env: + PR_TITLE: "${{ github.event.pull_request.title }}" + run: | + # Squash merges often use the PR title as the final commit header. + # Keep the title under the same Conventional Commit policy as commits. + printf '%s\n' "${PR_TITLE}" | npx commitlint --config commitlint.config.cjs --verbose + - name: "Run repository commitlint orchestration" env: EVENT_NAME: "${{ github.event_name }}" diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index 4a32510..357ba00 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -16,6 +16,8 @@ "on": pull_request: + branches: + - "main" types: - "opened" - "synchronize" diff --git a/.github/workflows/docs-smoke.yml b/.github/workflows/docs-smoke.yml index ac30a9f..ec1e98b 100644 --- a/.github/workflows/docs-smoke.yml +++ b/.github/workflows/docs-smoke.yml @@ -26,15 +26,14 @@ "on": # Run docs smoke on the same review surfaces that can merge repository docs - # changes into maintained branches. + # changes into the protected branch, and on ordinary topic-branch pushes so + # navigation regressions are visible before a pull request exists. push: branches: - - "main" - - "next" + - "**" pull_request: branches: - "main" - - "next" merge_group: workflow_dispatch: diff --git a/.github/workflows/govulncheck.yml b/.github/workflows/govulncheck.yml index ebb15cc..95f38d7 100644 --- a/.github/workflows/govulncheck.yml +++ b/.github/workflows/govulncheck.yml @@ -18,17 +18,14 @@ push: branches: - "main" - - "next" pull_request: branches: - "main" - - "next" - merge_group: workflow_dispatch: schedule: # Weekly scheduled scan so the repository is re-checked even when there are # no dependency-changing pull requests in flight. - - cron: "13 5 * * 2" + - cron: "0 3 * * 1" permissions: contents: "read" @@ -49,7 +46,7 @@ jobs: uses: "actions/checkout@v6" with: # Full history is not strictly required by govulncheck itself, but it - # keeps repository state consistent across push / PR / merge-group jobs + # keeps repository state consistent across push / PR jobs # and matches the rest of the security workflow layer. fetch-depth: 0 diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index f377ee9..5247e58 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -17,12 +17,10 @@ "on": push: branches: - - "main" - - "next" + - "**" pull_request: branches: - "main" - - "next" # Support merge queues / merge groups if the repository enables them later. merge_group: workflow_dispatch: @@ -91,7 +89,7 @@ jobs: # change set. This makes review output more actionable and avoids # turning old repository debt into PR noise. # - # On push events to release branches, lint the whole repository state. + # On push events, lint the whole repository state. only-new-issues: ${{ github.event_name == 'pull_request' || github.event_name == 'merge_group' }} # Refresh caches periodically so stale linter state does not linger diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 34f9b0e..bea4f82 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -16,17 +16,21 @@ "on": push: - branches: - - "main" - - "next" + tags: + - "v*.*.*" workflow_dispatch: + inputs: + release_tag: + description: "Existing stable tag to publish (for example: v1.2.3)" + required: true + type: "string" concurrency: - # Only one release pipeline should run per branch at a time. + # Keep only one stable release publication active for the same ref at a time. # - # This avoids overlapping semantic-release executions on the same release - # branch, which can otherwise race on tags, changelog commits, or GitHub - # release publication. + # Tag-triggered runs naturally serialize by tag ref. Manual dispatches are + # still grouped by the selected workflow ref, which is good enough for the + # recovery-oriented manual path. group: "release-${{ github.workflow }}-${{ github.ref }}" cancel-in-progress: true @@ -34,22 +38,54 @@ jobs: verify: name: "verify" runs-on: "ubuntu-latest" + outputs: + release_tag: "${{ steps.meta.outputs.release_tag }}" + release_sha: "${{ steps.meta.outputs.release_sha }}" permissions: contents: "read" steps: - - name: "Checkout repository" + - name: "Checkout stable tag source" uses: "actions/checkout@v6" with: - # semantic-release itself needs full history later, but keeping fetch - # depth explicit here avoids accidental dependence on a shallow clone. + # Stable releases are published from an existing SemVer tag, not from + # branch pushes. For manual dispatches, the caller must provide the + # tag explicitly. + ref: "${{ github.event_name == 'workflow_dispatch' && inputs.release_tag || github.ref_name }}" fetch-depth: 0 + - name: "Resolve and validate stable release tag" + id: meta + shell: "bash" + run: | + set -euo pipefail + + if [[ "${GITHUB_EVENT_NAME}" == "workflow_dispatch" ]]; then + release_tag="${{ inputs.release_tag }}" + else + release_tag="${GITHUB_REF_NAME}" + fi + + if [[ ! "${release_tag}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then + echo "stable releases require a SemVer tag in the form vX.Y.Z; got ${release_tag}" >&2 + exit 1 + fi + + git fetch origin main --force + + release_sha="$(git rev-list -n 1 "${release_tag}")" + if ! git merge-base --is-ancestor "${release_sha}" "origin/main"; then + echo "stable release tag ${release_tag} does not point to a commit reachable from origin/main" >&2 + exit 1 + fi + + echo "release_tag=${release_tag}" >> "$GITHUB_OUTPUT" + echo "release_sha=${release_sha}" >> "$GITHUB_OUTPUT" + - name: "Setup Go" uses: "actions/setup-go@v6" with: - # Respect the Go version declared by the repository. go-version-file: "go.mod" cache: true @@ -71,108 +107,39 @@ jobs: needs: - "verify" - # Releasing happens only after the verification job passes on a push to a - # configured release branch (`main` or `next`). - # - # This does NOT replace branch protection: - # - protect `main` and `next` - # - require pull requests before merge - # - require approving reviews - # - require status checks - # - # semantic-release can only be made "PR-gated" through repository rules, - # not through release.config.cjs alone. permissions: - # Needed by @semantic-release/github to publish releases. contents: "write" - # Needed only if the GitHub plugin adds labels or comments to issues. - issues: "write" - # Needed only if the GitHub plugin adds labels or comments to pull requests. - pull-requests: "write" - - # Optional but strongly recommended: - # create a GitHub Environment named `release` and require reviewers there. - # - # If the environment is configured with required reviewers, this job will - # pause for explicit approval before using secrets and publishing a release. + environment: name: "release" steps: - - name: "Checkout repository" - uses: "actions/checkout@v6" - with: - # semantic-release requires the full git history and tags to calculate - # the next version correctly. - fetch-depth: 0 - - - name: "Setup Node.js" - uses: "actions/setup-node@v6" - with: - # This repository intentionally does not carry `package.json` or - # `package-lock.json`. - # - # Release tooling is installed ephemerally at workflow runtime instead - # of being modeled as a committed Node project inside this Go library. - # Disable package-manager cache discovery so `setup-node` does not look - # for absent npm manifests. - node-version: "24" - package-manager-cache: false - - - name: "Show Node and npm versions" - run: | - node --version - npm --version - - - name: "Install standalone semantic-release toolchain" - run: | - # Keep the workflow honest about repository policy: - # - no `package.json` - # - no `package-lock.json` - # - no half-lockfile / half-fallback logic - # - # `--package-lock=false` prevents npm from fabricating a lockfile in the - # checked-out tree during CI. - npm install --no-save --package-lock=false --no-audit --fund=false \ - semantic-release \ - @semantic-release/changelog \ - @semantic-release/commit-analyzer \ - @semantic-release/git \ - @semantic-release/github \ - @semantic-release/release-notes-generator \ - conventional-changelog-conventionalcommits - - - name: "Verify release configuration exists" - run: test -f release.config.cjs - - - name: "Run semantic-release" + - name: "Publish GitHub release from stable tag" env: - # Default path: - # - GITHUB_TOKEN is enough to publish GitHub releases. - # - # Important limitation: - # - if your branch protection forbids bot pushes, the @semantic-release/git - # plugin may fail when committing CHANGELOG.md back to the release - # branch using the default token. - # - # If you intentionally allow protected-branch release commits through a - # dedicated GitHub App or PAT, store that token as - # `SEMANTIC_RELEASE_TOKEN` and it will take precedence. - # - # If you do NOT want automation to push back to a protected branch, - # remove `@semantic-release/git` / changelog commit-back behavior from - # the semantic-release configuration instead of weakening branch rules - # casually. - # - # This workflow intentionally installs semantic-release tooling - # ephemerally and does not rely on a committed Node manifest. - DEFAULT_GH_TOKEN: "${{ secrets.GITHUB_TOKEN }}" - SEMANTIC_RELEASE_TOKEN: "${{ secrets.SEMANTIC_RELEASE_TOKEN }}" + GH_TOKEN: "${{ secrets.GITHUB_TOKEN }}" + RELEASE_TAG: "${{ needs.verify.outputs.release_tag }}" + shell: "bash" run: | - if [ -n "${SEMANTIC_RELEASE_TOKEN}" ]; then - export GH_TOKEN="${SEMANTIC_RELEASE_TOKEN}" - else - export GH_TOKEN="${DEFAULT_GH_TOKEN}" + set -euo pipefail + + if gh release view "${RELEASE_TAG}" >/dev/null 2>&1; then + echo "GitHub release for ${RELEASE_TAG} already exists; leaving it unchanged." + exit 0 fi - npx semantic-release + gh release create "${RELEASE_TAG}" \ + --verify-tag \ + --generate-notes \ + --title "${RELEASE_TAG}" + + - name: "Publish workflow summary" + shell: "bash" + run: | + { + echo "# Release Summary" + echo + echo "- Stable tag: \`${{ needs.verify.outputs.release_tag }}\`" + echo "- Source commit: \`${{ needs.verify.outputs.release_sha }}\`" + echo "- Policy: stable releases must point to a commit reachable from \`main\`" + echo "- Result: GitHub release published or already present" + } >> "$GITHUB_STEP_SUMMARY" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 5c9a30f..e3d5293 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -18,13 +18,15 @@ push: branches: - "main" + workflow_dispatch: + branch_protection_rule: schedule: - # Weekly scheduled scan on the default branch. + # Weekly scheduled scan on the stable branch. # # Keep a scheduled run even if repository activity is low, so the published # Scorecard result is refreshed against newer query logic and newer # repository state over time. - - cron: "31 4 * * 1" + - cron: "0 5 * * 1" # Important workflow restrictions when `publish_results: true` is enabled: # @@ -36,7 +38,7 @@ permissions: "read-all" concurrency: - # Keep only one Scorecards run active for the default branch. + # Keep only one Scorecards run active for the stable branch. # # This avoids overlapping result publication and reduces unnecessary pressure # on the Scorecard API / code scanning upload path. diff --git a/.github/workflows/security-codeql.yml b/.github/workflows/security-codeql.yml index 9b8e048..48b2461 100644 --- a/.github/workflows/security-codeql.yml +++ b/.github/workflows/security-codeql.yml @@ -18,18 +18,16 @@ push: branches: - "main" - - "next" pull_request: branches: - "main" - - "next" schedule: # Weekly scheduled scan. # # Keep at least one scheduled run even when normal PR traffic is low, so # CodeQL still revisits the repository against newer query packs and newer # vulnerability knowledge over time. - - cron: "27 3 * * 1" + - cron: "0 4 * * 1" workflow_dispatch: permissions: diff --git a/commitlint.config.cjs b/commitlint.config.cjs index 3eb8024..8c2a1a9 100644 --- a/commitlint.config.cjs +++ b/commitlint.config.cjs @@ -18,11 +18,12 @@ * commitlint configuration for `arcoris.dev/pool`. * * Why this file exists: - * - semantic-release derives release type from commit messages; - * - malformed commit messages can silently prevent the intended release from - * being cut; - * - a repository-level commit convention keeps changelog sections, release - * notes, and review expectations consistent. + * - malformed commit headers make pull requests, squash merges, and repository + * history harder to review; + * - the repository expects Conventional Commit style for both commits and PR + * titles; + * - a repository-level commit convention keeps release notes, review + * expectations, and automation behavior consistent. * * Important: * - commitlint does NOT publish releases; @@ -33,22 +34,25 @@ * This repository uses Conventional Commit style with a small set of explicit * repository-specific rules: * - * Release-driving types: + * Allowed change types: * - feat * - fix + * - refactor * - perf + * - docs + * - test + * - build + * - ci + * - chore * - revert - * - security - * - deps * * Non-release types still allowed in history: * - build * - ci * - chore * - docs - * - refactor - * - style * - test + * - refactor * * Breaking changes: * - a breaking change should use the `!` marker in the header and a @@ -69,26 +73,24 @@ * - perf(bench): reduce chart generation overhead * - docs(readme): clarify pointer-like guidance * - test(backend): cover syncpool miss path - * - security(workflow): tighten release token permissions - * - deps(actions): update github/codeql-action to v4 + * - ci(github): tighten release tag gating + * - chore(deps): update github/codeql-action to v4 * * Bad examples: * - fixed stuff * - update files * - docs: README * - feat: Added New Feature - * - chore(release): 1.2.3 + * - deps(actions): update workflow action * - * The last example is intentionally disallowed for humans. Release commits are - * generated by semantic-release automation and should not be authored manually. + * The last example is intentionally disallowed for humans. Release-shaped + * headers are reserved for repository release automation and should not be + * authored manually. */ module.exports = { /** * Start from the official conventional baseline. - * - * This applies the conventional-changelog-conventionalcommits parser preset, - * which aligns well with semantic-release. */ extends: ['@commitlint/config-conventional'], @@ -131,15 +133,12 @@ module.exports = { 'build', 'ci', 'chore', - 'deps', 'docs', 'feat', 'fix', 'perf', 'refactor', 'revert', - 'security', - 'style', 'test', ], ], @@ -205,8 +204,7 @@ module.exports = { * - `!` in the header * - `BREAKING CHANGE:` footer * - * This avoids ambiguous breaking markers and aligns commitlint with the - * repository's semantic-release expectations. + * This avoids ambiguous breaking markers and keeps history explicit. */ 'breaking-change-exclamation-mark': [2, 'always'], @@ -214,9 +212,9 @@ module.exports = { * Repository-specific policy: * do not allow manual human-authored release commits. * - * semantic-release may still generate `chore(release): x.y.z [skip ci]` - * automatically in CI. This rule exists to discourage contributors from - * using release-shaped commits manually in normal development. + * Release automation may still generate `chore(release): x.y.z [skip ci]` + * or a similar reserved shape. This rule exists to discourage contributors + * from using release-shaped commits manually in normal development. * * commitlint cannot express "disallow only some exact commit headers" as a * built-in rule, so we enforce it with a simple plugin below. @@ -230,8 +228,8 @@ module.exports = { /** * Reject manual release-commit headers that imitate semantic-release. * - * We allow automation to create release commits during the release - * workflow, but contributors should not use that shape in ordinary + * We allow release automation to create reserved release-shaped headers + * when needed, but contributors should not use that shape in ordinary * development history. */ 'arcoris-no-manual-release-commit': ({header}) => { @@ -240,7 +238,7 @@ module.exports = { return [ !forbidden.test(value), - 'do not author manual release commits; release commits are created by semantic-release automation', + 'do not author manual release commits; release-shaped headers are reserved for release automation', ]; }, }, @@ -299,16 +297,6 @@ module.exports = { title: 'Reverts', emoji: '', }, - security: { - description: 'A security-relevant fix or hardening change', - title: 'Security', - emoji: '', - }, - deps: { - description: 'A dependency update with repository impact', - title: 'Dependencies', - emoji: '', - }, docs: { description: 'Documentation only', title: 'Documentation', @@ -334,11 +322,6 @@ module.exports = { title: 'CI', emoji: '', }, - style: { - description: 'Formatting or stylistic cleanup', - title: 'Style', - emoji: '', - }, chore: { description: 'Repository maintenance that is not otherwise classified', title: 'Chores', diff --git a/release.config.cjs b/release.config.cjs index ec50f89..18f6215 100644 --- a/release.config.cjs +++ b/release.config.cjs @@ -22,8 +22,10 @@ * such as `@semantic-release/npm` and package.json version rewriting. * * Release model: - * - `main` publishes stable releases to the default channel - * - `next` publishes prereleases to the `next` channel + * - `main` is the stable release source + * - stable tags are created from `main` + * - stable publication is intentionally separated from ordinary development + * branch pushes * * Versioning model: * - `feat` -> minor @@ -49,7 +51,7 @@ * - protected branches / rulesets * - required status checks * - required reviews - * - release workflow triggers only on pushes to protected release branches + * - tag-gated or otherwise protected release workflow triggers * * semantic-release itself explicitly warns that any user who can push to a * configured release branch can publish a release. Protect those branches. @@ -70,23 +72,16 @@ module.exports = { * `main` * Stable releases published to the default distribution channel. * - * `next` - * Prereleases published to the `next` channel with prerelease identifiers. - * * This keeps the repository release flow simple: - * - stable work lands on `main` - * - preview / pre-release work can flow through `next` + * - normal development lands on short-lived topic branches + * - normal changes merge into `main` + * - stable release tags are cut from `main` */ branches: [ { name: 'main', channel: 'latest', }, - { - name: 'next', - channel: 'next', - prerelease: 'next', - }, ], /** From ed11534d0b57a739b15fa73faa4d8f88f8ec03d4 Mon Sep 17 00:00:00 2001 From: Anton Bochkov Date: Fri, 24 Apr 2026 11:11:11 +0300 Subject: [PATCH 5/6] docs(contributing): document protected main workflow --- .github/PULL_REQUEST_TEMPLATE.md | 216 ++--------- CONTRIBUTING.md | 615 +++++++++++++++---------------- 2 files changed, 332 insertions(+), 499 deletions(-) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 2949295..c0c4409 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -14,204 +14,62 @@ See the License for the specific language governing permissions and limitations under the License. --> -# Before you submit - -Use this template to explain the change clearly to maintainers and reviewers. - -Guidelines: -- Replace the guidance text with actual content. -- If a section does not apply, write `None`. -- Be concrete: include commands, file paths, benchmark artifacts, and exact behavior changes. -- If the PR changes runtime semantics, lifecycle ordering, ownership expectations, benchmark methodology, or documentation, describe that explicitly. -- If the PR makes a performance claim, attach the relevant benchmark evidence. - # Summary -What changed in this PR? - -Keep it short and concrete. -Preferred size: 3-7 bullets or 1-3 short paragraphs. - -Good examples: -- Clarified `Put` callback ordering in package docs and lifecycle tests. -- Added shape-specific benchmark coverage for large value types. -- Refined snapshot chart generation to group by benchmark family and metric. - -Avoid: -- Improved the package. -- Fixed various things. - -- -- -- - -## Linked work - -What should reviewers look at together with this PR? - -- Issue(s): -- Design / proposal: -- Report / benchmark reference: -- Auto-close directive (`Closes #123`, `Fixes #123`, etc.): +- What changed? +- Why is this change needed? -## Change classification - -Select all items that apply. +## Type of change - [ ] Bug fix - [ ] Feature -- [ ] Refactoring with no intended behavior change -- [ ] Performance or allocation improvement -- [ ] Benchmark / chart / report tooling change +- [ ] Refactor +- [ ] Performance improvement - [ ] Documentation change -- [ ] Build / CI / release tooling change -- [ ] Security / ownership / isolation clarification -- [ ] Breaking change -- [ ] Mechanical change only - -## Affected areas - -Select all repository areas materially affected by this PR. - -- [ ] Public runtime (`Pool[T]`, `Get`, `Put`) -- [ ] Lifecycle policy (`Options[T]`, `Reset`, `Reuse`, `OnDrop`) -- [ ] Backend (`internal/backend`) -- [ ] Ownership / concurrency contract -- [ ] Public package docs / Go doc -- [ ] Unit tests -- [ ] Benchmark source files -- [ ] Benchmark scripts (`bench/scripts`) -- [ ] Charts / reports / performance docs -- [ ] Root README or repository docs -- [ ] CI / release / repository automation - -## Why this change - -What problem existed before this PR? -Why is this change needed now? - -- Problem statement: -- Why now: -- Who benefits (caller, maintainer, contributor, benchmark/report workflow, etc.): - -## Reviewer guidance - -Tell reviewers where to focus. - -Examples: -- Review focus: `Put` ordering and ownership wording. -- Key invariants: admission happens before reset; rejected values are not stored. -- Files worth focused review: `lifecycle.go`, `lifecycle_test.go`, `docs/lifecycle.md`. +- [ ] Test change +- [ ] Build / toolchain change +- [ ] CI / automation change +- [ ] Chore / maintenance +- [ ] Revert -- Review focus: -- Key invariants or contracts: -- Files / flows worth focused review: -- Explicitly out of scope: +## Target branch -## Behavioral and contract impact - -Describe what changes in package behavior, documentation, or repository workflow. - -If a section does not apply, write `None`. - -- Public API impact: -- Lifecycle semantic impact: -- Ownership or concurrency impact: -- Backend or storage impact: -- Benchmark or report workflow impact: -- Documentation or example impact: -- Failure mode / misuse sensitivity: +- Target branch: `main` +- [ ] This PR targets `main` +- [ ] I did not push directly to the protected branch ## Validation -Prefer copy-pasteable commands and precise evidence. - -### Validation steps - -1. -2. -3. - -### Validation evidence - -- Commands / suites: -- Environment: -- Evidence summary: +- Local commands run: + - `go test ./...` + - `go test -race ./...` + - `go vet ./...` +- Additional checks run: - Remaining validation gaps: -### Tests executed - -Select what actually ran for this PR. - -- [ ] Unit tests -- [ ] Race detector -- [ ] Backend tests -- [ ] Benchmark compilation / smoke validation -- [ ] Benchmarks / benchmark scripts -- [ ] Chart generation validation -- [ ] Documentation validation -- [ ] Lint / static analysis -- [ ] Manual validation -- [ ] Not run, explained above - -## Performance evidence - -Complete this section if the PR makes any performance, allocation, or chart/report claim. -If not applicable, write `None`. +## Documentation impact -- Relevant benchmark family: -- Raw artifact(s): -- Compare artifact(s): -- Chart or report reference: -- Claim being made: -- Limits of that evidence: +- [ ] No documentation changes needed +- [ ] Documentation updated in this PR +- Notes: -## Compatibility and release impact +## Release impact -Be explicit even when there is no impact. +- [ ] No release impact +- [ ] Patch release impact +- [ ] Minor release impact +- [ ] Major / breaking change +- Notes: -- Breaking change: [ ] No [ ] Yes -- Migration required: [ ] No [ ] Yes -- Public API changed: [ ] No [ ] Yes -- Lifecycle / ownership semantics changed: [ ] No [ ] Yes -- Benchmark methodology or chart/report expectations changed: [ ] No [ ] Yes -- Default behavior changed: [ ] No [ ] Yes - -### Migration or upgrade notes - -What should maintainers or users do when adopting this change? - -- - -## Security and data-safety considerations - -For this package, security-relevant changes may include: -- object reuse causing stale data retention; -- ownership confusion after `Put`; -- concurrency misuse surfaces; -- documentation that could cause unsafe caller assumptions. - -If not applicable, write `None`. - -- Security / ownership / data-retention impact: -- New trust assumptions or misuse risks: -- Third-party material copied or adapted: -- License / attribution follow-up: - -## Known limitations and follow-up +## Linked work -- Known limitations: -- Follow-up work: +- Related issue(s): +- Related proposal / design note: ## Author checklist -Select all items that are true for this PR. - -- [ ] I reviewed the diff myself before requesting review. -- [ ] I removed accidental secrets, tokens, personal data, debug-only artifacts, and temporary benchmark outputs. -- [ ] I added or updated tests where needed, or explained why they were not run. -- [ ] I updated documentation, examples, or comments where needed. -- [ ] I described lifecycle, ownership, compatibility, and behavioral impact where relevant. -- [ ] I attached benchmark/report evidence for any performance claim. -- [ ] I reviewed benchmark, chart, or report implications where relevant. -- [ ] CI is green, or failing / non-required jobs are explained. +- [ ] PR title follows `type(scope): summary` +- [ ] This change is based on a short-lived topic branch created from `main` +- [ ] Tests and checks run locally are listed above +- [ ] Docs, comments, or examples were updated where needed +- [ ] Release impact is described above diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 654d92f..9c5e319 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -2,444 +2,419 @@ # Contributing to `arcoris.dev/pool` -**Contributor guide for a small, policy-driven Go runtime with benchmark-grade evidence and repository-grade documentation.** +**Contributor workflow for a small Go library with a protected `main` branch and short-lived topic branches.** -[![Start Here](https://img.shields.io/badge/Start-Docs%20Index-0F766E?style=flat)](docs/index.md) +[![Docs Index](https://img.shields.io/badge/Docs-Index-0F766E?style=flat)](docs/index.md) [![Package Contract](https://img.shields.io/badge/Contract-doc.go-1D4ED8?style=flat)](doc.go) +[![Lifecycle](https://img.shields.io/badge/Guide-Lifecycle-B45309?style=flat)](docs/lifecycle.md) [![PR Template](https://img.shields.io/badge/Review-PR%20Template-0F172A?style=flat)](.github/PULL_REQUEST_TEMPLATE.md) -[![Performance](https://img.shields.io/badge/Evidence-Benchmark%20Workflow-B45309?style=flat)](docs/performance/README.md) -[Start Here](#start-here) · [Contribution Routes](#contribution-routes) · [Repository Ground Rules](#repository-ground-rules) · [Validation](#validation-by-change-type) · [Performance Evidence](#performance-and-benchmark-evidence) · [Pull Requests](#pull-requests) - -Small scope · Explicit lifecycle semantics · Human-readable docs · Evidence-backed performance work - -**Read by change type:** [Runtime](docs/lifecycle.md) · [Scope](docs/non-goals.md) · [Architecture](docs/architecture.md) · [Benchmark workflow](docs/performance/methodology.md) · [PR expectations](.github/PULL_REQUEST_TEMPLATE.md) +[Overview](#overview) · [Branch Model](#branch-model) · [Branch Naming Convention](#branch-naming-convention) · [Commit Message Convention](#commit-message-convention) · [Pull Request Flow](#pull-request-flow) · [Local Validation](#local-validation) · [GitHub Actions Policy](#github-actions-policy) -`arcoris.dev/pool` is intentionally small. The package exposes a narrow public -runtime, keeps lifecycle policy explicit, treats benchmark artifacts as real -engineering evidence, and expects documentation to stay aligned with code. - -That means a strong contribution here is usually focused, well-scoped, and -careful about contracts. A weak contribution usually widens the package into a -larger framework, hides lifecycle behavior behind abstraction, or makes broad -claims without repository evidence. - -## Start here - -| If you want to... | Read first | Then continue with | -| --- | --- | --- | -| understand the package before changing code | [README](README.md) | [Package contract](doc.go), [Lifecycle guide](docs/lifecycle.md) | -| change runtime or ownership behavior | [Lifecycle guide](docs/lifecycle.md) | [Non-goals guide](docs/non-goals.md), [Architecture guide](docs/architecture.md) | -| propose a scope or contract change | [Non-goals guide](docs/non-goals.md) | [Package contract](doc.go), [design proposal form](.github/ISSUE_TEMPLATE/03-design-proposal.yml) | -| work on benchmarks, charts, or reports | [Performance overview](docs/performance/README.md) | [Benchmark methodology](docs/performance/methodology.md), [Benchmark matrix](docs/performance/benchmark-matrix.md), [Interpretation guide](docs/performance/interpretation-guide.md) | -| improve docs or navigation | [Docs index](docs/index.md) | [README](README.md), this guide | -| prepare a pull request | [PR template](.github/PULL_REQUEST_TEMPLATE.md) | [Validation by change type](#validation-by-change-type) | - -## Contribution routes - -Choose the smallest route that matches the work. - -| Route | Use it for | Reference | -| --- | --- | --- | -| Bug report | reproducible defects in runtime behavior, ownership, docs, or tooling | [01-bug-report.yml](.github/ISSUE_TEMPLATE/01-bug-report.yml) | -| Feature request | scoped improvements that fit the current product boundaries | [02-feature-request.yml](.github/ISSUE_TEMPLATE/02-feature-request.yml) | -| Design proposal | public contract changes, lifecycle changes, benchmark methodology changes, or larger repository structure changes | [03-design-proposal.yml](.github/ISSUE_TEMPLATE/03-design-proposal.yml) | -| Documentation issue | wrong, stale, confusing, or missing docs | [04-docs-issue.yml](.github/ISSUE_TEMPLATE/04-docs-issue.yml) | -| Contract clarification | narrow maintainer questions about semantics, compatibility, or benchmark interpretation | [05-contract-question.yml](.github/ISSUE_TEMPLATE/05-contract-question.yml) | - -Open a design proposal before coding if the change would: +`arcoris.dev/pool` is intentionally small. The repository centers on a narrow +typed pool runtime, explicit lifecycle callbacks, internal `sync.Pool`-backed +storage, documentation that acts as maintained surface area, and benchmark +tooling that is treated as engineering evidence rather than decoration. -- alter `Get` or `Put` semantics; -- change callback ordering or ownership rules; -- introduce new coordination concepts such as leases, borrow tracking, or inventory semantics; -- redefine the benchmark evidence model or stable suite vocabulary; -- materially change the repository documentation architecture. +That shape does not need a permanent integration branch. Normal work should +stay on short-lived topic branches, merge through pull requests, and release +from protected `main`. -## Repository ground rules +## Overview -### 1. Keep the public runtime narrow +- `main` is the only long-lived branch. +- `main` is the default branch and the protected branch. +- All normal pull requests target `main`. +- Direct pushes to `main` are forbidden by repository policy. +- Stable releases come from SemVer tags created from commits reachable from + `main`. +- Normal flow is: -The package is intentionally centered on: +```text +topic branch -> pull request into main -> merge -> optional stable tag from main +``` -- `Pool[T]`; -- explicit `Get` / `Put` ownership transfer; -- callback-based lifecycle policy in `Options[T]`; -- an internal backend that is not part of the public contract. +Read these first when the change touches package behavior: -Contributions should preserve that narrowness unless a design-level change is -explicitly proposed and justified. +- [README](README.md) +- [Package contract](doc.go) +- [Lifecycle guide](docs/lifecycle.md) +- [Architecture guide](docs/architecture.md) +- [Non-goals guide](docs/non-goals.md) -### 2. Preserve lifecycle behavior explicitly +Read these first when the change touches performance workflow: -The lifecycle contract is not an implementation detail. Contributions must -preserve or clearly document any change to: +- [Performance overview](docs/performance/README.md) +- [Benchmark methodology](docs/performance/methodology.md) +- [Benchmark matrix](docs/performance/benchmark-matrix.md) +- [Interpretation guide](docs/performance/interpretation-guide.md) -- acquisition behavior; -- return-path ordering; -- reset and reuse interaction; -- drop observation semantics; -- ownership transfer at `Get` and `Put`. +## Branch model -The canonical order today is: +### `main` -1. evaluate `Reuse`; -2. call `OnDrop` and stop when reuse is denied; -3. call `Reset` only for accepted values; -4. store only already-clean retained values. +- The only long-lived branch. +- The default branch. +- The protected branch. +- The target for all normal pull requests. +- The source of stable SemVer release tags. -If your change affects this area, [docs/lifecycle.md](docs/lifecycle.md) and -[doc.go](doc.go) are part of the change. +Direct pushes to `main` are not allowed by policy, even if GitHub settings +temporarily permit them. Repository rulesets should enforce the same policy. -### 3. Respect the product boundaries +### Topic branches -`arcoris.dev/pool` is a typed temporary-object reuse runtime, not a general -resource manager and not a broad pooling framework. Changes are usually out of -scope when they add: +- Created from `main`. +- Used for all normal work. +- Deleted after merge. -- stable inventory or cache guarantees; -- lease tokens or runtime borrow tracking as a core feature; -- queue, scheduler, semaphore, or coordination semantics; -- mandatory package-owned interfaces on `T`; -- framework-style abstraction layers that do not strengthen the current model. +### No permanent `next` -When in doubt, use [docs/non-goals.md](docs/non-goals.md) as the boundary -document. +This repository does not use a permanent `next` branch. For a small library, a +separate integration branch adds process overhead without improving the normal +review and release flow. -### 4. Treat benchmark artifacts as evidence +If a remote `next` branch still exists from an older workflow model, treat it +as obsolete. Maintainers may delete it manually after confirming that it +contains no unique changes. -Performance work in this repository is first-class engineering work, not -presentation-only work. Strong performance claims should be backed by: +If release stabilization is ever needed later, maintainers may create a +temporary branch such as `release/v0.2.0`. That is an exception, not the normal +workflow. -- repeated raw benchmark artifacts; -- matching environment capture; -- comparison output when revisions are compared; -- charts only when they help explain evidence; -- a report when the claim is substantial enough to deserve one. +## Creating a working branch -Local exploratory numbers are useful during development, but they are not -repository-grade evidence by themselves. +Create normal work branches from `main`: -### 5. Keep documentation synchronized +```bash +git fetch origin +git switch main +git pull --ff-only origin main +git switch -c ci/simplify-branch-policy +git push -u origin ci/simplify-branch-policy +``` -This repository treats documentation as maintained surface area. If a change -affects semantics, workflow, or navigation, update the affected docs in the -same change or call out the follow-up explicitly in the PR. +Then open a pull request into `main`. -At minimum, keep these entry points consistent when navigation changes: +Do not create normal work branches from an obsolete `next` branch. Do not +create them from an old release tag. Use current `main` unless maintainers +explicitly approved a different temporary base for exceptional release +stabilization work. -- [README](README.md); -- [Docs index](docs/index.md); -- [Contributing guide](CONTRIBUTING.md); -- [Security policy](SECURITY.md); -- [Code of Conduct](CODE_OF_CONDUCT.md); -- the relevant document inside [docs/](docs/). +## Branch naming convention -## Repository map +Use short, lower-case, hyphen-separated branch names. -| Area | Role | -| --- | --- | -| [README](README.md) | public landing page and first-stop package overview | -| [Package contract (`doc.go`)](doc.go) | Go-facing package contract and runtime model | -| [Contributing guide](CONTRIBUTING.md) | contributor workflow, validation, and repository expectations | -| [Security policy](SECURITY.md) | vulnerability reporting path, supported-version policy, and repo-specific security scope | -| [Code of Conduct](CODE_OF_CONDUCT.md) | repository collaboration standards, reporting expectations, and moderation baseline | -| [Third-Party Notices](THIRD_PARTY_NOTICES.md) | attribution record for adapted upstream material and pinned third-party tooling references | -| [Architecture guide](docs/architecture.md) | structure, layering, and dependency boundaries | -| [Lifecycle guide](docs/lifecycle.md) | normative lifecycle and ownership semantics | -| [Non-goals guide](docs/non-goals.md) | scope boundaries and proposal limits | -| [Performance overview](docs/performance/README.md) | benchmark/report entry point | -| [Benchmark scripts](bench/scripts/) | repeatable benchmark, compare, profile, and chart tooling | -| [Internal backend](internal/backend/) | storage implementation details behind the public runtime | -| [Test utilities](internal/testutil/) | shared helpers for tests and benchmarks | -| [Issue forms](.github/ISSUE_TEMPLATE/) | structured issue intake | -| [PR template](.github/PULL_REQUEST_TEMPLATE.md) | required review and validation shape for pull requests | +Preferred format: -## Local setup +```text +/ +``` -### Toolchain baseline +Allowed prefixes: -The module baseline is defined in [go.mod](go.mod): +- `feat/` for new API-level or user-visible functionality +- `fix/` for bug fixes +- `refactor/` for internal restructuring without behavior changes +- `perf/` for performance improvements +- `docs/` for documentation-only changes +- `test/` for tests and test infrastructure +- `build/` for build system, module, or toolchain changes +- `ci/` for GitHub Actions and automation changes +- `chore/` for maintenance that does not fit another category +- `revert/` for reverting previous changes -- minimum Go version: `1.25.0`; -- preferred toolchain: `go1.25.9`. +Examples: -Stay on one Go version when collecting comparison-quality benchmark evidence. +- `feat/typed-pool-options` +- `fix/pool-put-nil-value` +- `refactor/backend-lifecycle` +- `perf/syncpool-allocation-path` +- `docs/performance-methodology` +- `test/pool-concurrency-cases` +- `build/go-toolchain-policy` +- `ci/simplify-branch-policy` +- `chore/dependabot-main-target` +- `revert/remove-invalid-release-change` -### Python tooling for charts +Avoid vague names: -If you touch [plot_benchmarks.py](bench/scripts/plot_benchmarks.py) or -generate charts locally, use the Python dependencies listed in -[requirements.txt](requirements.txt). +- `fix` +- `update` +- `changes` +- `work` +- `wip` +- `solve` -### Local validation baseline +Do not use `solve/` as a branch prefix. It is too vague and does not describe +the change type. -Treat local validation as required even when CI is unavailable, incomplete, or -still being formalized. +## Commit message convention -For code changes, the practical baseline is: +Use Conventional Commits: -```bash -go test ./... -go test -race ./... -go vet ./... +```text +type(scope): summary ``` -If lint configuration is part of the repository state you are working against, -run it locally when relevant. When [.golangci.yml](.golangci.yml) is present, -`golangci-lint run` is the expected local lint entrypoint. - -## Validation by change type - -Use the minimum validation that matches the work. More is welcome when the -change is risky. +Allowed types: -| Change type | Minimum validation | Also update when needed | -| --- | --- | --- | -| Runtime, lifecycle, or backend change | `go test ./...`, `go test -race ./...`, `go vet ./...` | [doc.go](doc.go), [README](README.md), [docs/lifecycle.md](docs/lifecycle.md), [docs/architecture.md](docs/architecture.md) | -| Test-only change | run the affected test package plus `go test ./...` when practical | comments or helper docs if test terminology changed | -| Documentation-only change | verify links, anchors, file names, benchmark names, and embedded chart paths | any adjacent nav docs that should still point to the changed file | -| Benchmark source change | ensure the benchmark compiles and the affected case or suite runs | [docs/performance/benchmark-matrix.md](docs/performance/benchmark-matrix.md), possibly [docs/performance/interpretation-guide.md](docs/performance/interpretation-guide.md) | -| Benchmark script change | validate the changed script path, arguments, output naming, and artifact paths | [docs/performance/methodology.md](docs/performance/methodology.md) | -| Chart tooling change | validate the relevant chart mode and rendered output naming | [docs/performance/methodology.md](docs/performance/methodology.md), reports if curated outputs changed | -| Report or benchmark-interpretation change | verify artifact links and factual consistency against raw evidence | [docs/performance/reports/](docs/performance/reports/), [docs/performance/interpretation-guide.md](docs/performance/interpretation-guide.md) | -| Repository metadata change (`.github/`, release/lint config, docs navigation) | validate rendered Markdown or YAML shape and the affected local workflow | [CONTRIBUTING.md](CONTRIBUTING.md), [README](README.md), [docs/index.md](docs/index.md) when contributor routes changed | +- `feat` +- `fix` +- `refactor` +- `perf` +- `docs` +- `test` +- `build` +- `ci` +- `chore` +- `revert` -### Documentation-specific checks +Rules: -For docs-only work, explicitly check: +- use lower-case type; +- use a meaningful scope when possible; +- keep the summary concise; +- prefer imperative mood; +- do not end the summary with a period. -- descriptive link text instead of raw relative-path labels; -- correct file and section names; -- benchmark family names that match the maintained suite vocabulary; -- chart image paths that still resolve from the current document; -- contributor navigation that still reaches the right document. +Examples: -### Benchmark-script checks +- `fix(pool): reject nil values on Put` +- `ci(github): simplify workflow branch targets` +- `docs(contributing): document protected main workflow` +- `test(pool): cover Put and Get lifecycle cases` -For script changes, validate the exact behavior you touched. Typical checks are: +The same header shape is expected for pull request titles because squash merges +commonly use the PR title as the final commit message. -- `--help` output; -- required vs optional flags; -- output artifact names; -- artifact directory layout; -- interaction with [paths.sh](bench/scripts/paths.sh); -- compatibility with the terminology in - [docs/performance/methodology.md](docs/performance/methodology.md). +## Pull request flow -## Performance and benchmark evidence +Normal flow: -### Exploratory vs repository-grade work +```text +topic branch -> PR into main -> merge -> optional release tag from main +``` -Use exploratory benchmark runs while shaping code, but do not present them as a -finished repository claim without context. +Pull requests must: -Repository-grade evidence usually means: +- target `main`; +- use a Conventional Commit-style title; +- describe the motivation; +- describe the change; +- list local checks run; +- mention documentation impact; +- mention release impact if relevant; +- link related issues if relevant. -- repeated raw outputs in [bench/raw/](bench/raw/); -- matching environment capture files; -- compare output in [bench/compare/](bench/compare/) when revisions are compared; -- charts in [bench/charts/](bench/charts/) only when they improve readability; -- a curated report in [docs/performance/reports/](docs/performance/reports/) - when the claim deserves preservation. +Do not open normal pull requests into `next`. Do not document or use a +`next -> main` promotion step for ordinary work. -### Artifact commit policy +Use the repository PR template: -The repository intentionally keeps most benchmark artifacts untracked by -default. See [.gitignore](.gitignore) and the performance docs for the current -policy. +- [.github/PULL_REQUEST_TEMPLATE.md](.github/PULL_REQUEST_TEMPLATE.md) -Practical rule: +## Local validation -- throwaway raw outputs, compare outputs, profiles, and generated charts - usually stay local; -- curated human-authored documentation and reports may be committed; -- performance claims in a PR should still reference the local artifacts used to - produce the conclusion. +Use the raw Go commands that match this repository: -### Canonical workflows +```bash +go test ./... +go test -race ./... +go vet ./... +``` -Current-state snapshot: +For benchmark smoke validation, use the lightweight repository command only +when the change affects benchmark code or benchmark tooling: ```bash -bench/scripts/capture_env.sh --output bench/raw/.env.txt -bench/scripts/run_benchmarks.sh --suite all --name --count 10 -python3 bench/scripts/plot_benchmarks.py --mode snapshot --input bench/raw/.txt +go test -run '^$' -bench . -benchtime=100ms ./... ``` -Revision-to-revision comparison: +For documentation changes, also run the repository docs smoke script: ```bash -bench/scripts/capture_env.sh --output bench/raw/.env.txt -bench/scripts/run_benchmarks.sh --suite all --name --count 10 -bench/scripts/capture_env.sh --output bench/raw/.env.txt -bench/scripts/run_benchmarks.sh --suite all --name --count 10 - -bench/scripts/compare_benchmarks.sh \ - --old bench/raw/.txt \ - --new bench/raw/.txt \ - --name - -bench/scripts/compare_benchmarks.sh \ - --old bench/raw/.txt \ - --new bench/raw/.txt \ - --name \ - --format csv - -python3 bench/scripts/plot_benchmarks.py \ - --input bench/compare/.csv +python3 scripts/check_docs_smoke.py --repo-root . ``` -Focused profiling: +For benchmark or benchmark-script changes, also run the affected suite or +script locally. Typical repository commands include: ```bash -bench/scripts/profile_benchmarks.sh \ - --bench '^BenchmarkParallel_' \ - --name +go test ./internal/backend -run '^$' -bench '^BenchmarkSyncPool_' -benchmem -count 1 +go test . -run '^$' -bench '^BenchmarkPaths_' -benchmem -count 1 +bench/scripts/run_benchmarks.sh --help +bench/scripts/compare_benchmarks.sh --help +bench/scripts/profile_benchmarks.sh --help +python3 bench/scripts/plot_benchmarks.py --help ``` -For the authoritative workflow and vocabulary, use -[docs/performance/methodology.md](docs/performance/methodology.md). +If the change makes a performance claim, keep the evidence aligned with the +repository performance docs: -## Documentation synchronization rules +- [Performance overview](docs/performance/README.md) +- [Benchmark methodology](docs/performance/methodology.md) -### Update the right document, not every document +## GitHub Actions policy -Keep each document responsible for its own job: +The repository separates fast quality gates, PR-only dependency checks, +security scans, posture scans, and release-only workflows. -- [README](README.md) is the public landing page; -- [doc.go](doc.go) is the package-facing contract; -- [docs/lifecycle.md](docs/lifecycle.md) is the lifecycle authority; -- [docs/non-goals.md](docs/non-goals.md) is the scope-boundary authority; -- [docs/performance/README.md](docs/performance/README.md) and adjacent files - own the benchmark/report workflow. +### All branch pushes -Do not fix documentation drift by duplicating the same explanation everywhere. +Expected fast checks: -### Prefer semantic link labels +- `CI` +- `Lint` +- `Docs Smoke` +- `Benchmark Smoke` when it remains lightweight -Good: +### Pull requests into `main` -- [Lifecycle guide](docs/lifecycle.md) -- [Raw artifacts directory](bench/raw/) -- [Benchmark scripts directory](bench/scripts/) +Expected pull-request checks: -Avoid visible labels that expose raw path mechanics instead of meaning: +- `CI` +- `Lint` +- `Docs Smoke` +- `Benchmark Smoke` +- `Commit Lint` +- `Dependency Review` +- `Govulncheck` +- `CodeQL` -- ``docs/lifecycle.md`` as the only navigation cue; -- `../../../bench/raw/...` as the human-facing label. +### Pushes to `main` -### Keep names aligned with the codebase +Protected branch pushes are also expected to run: -When docs mention repository entities, keep them synchronized with the actual -files and vocabulary: +- `CI` +- `Lint` +- `Docs Smoke` +- `Benchmark Smoke` +- `Govulncheck` +- `CodeQL` -- benchmark family names; -- script names and flags; -- report file names; -- chart file names; -- directory names; -- issue or PR workflow labels. +### Tags -## Pull requests +Stable SemVer tags trigger: -Use focused pull requests with one logical change whenever possible. +- `Release` +- `Attest` -Good PR shapes: +### Schedule or manual dispatch -- one runtime fix; -- one documentation pass; -- one benchmark-family refinement; -- one tooling change; -- one report publication; -- one repository metadata change set. +Used for: -Avoid mixing unrelated runtime, docs, automation, and reporting work unless the -repository would be harder to review if split apart. +- `Govulncheck` +- `CodeQL` +- `Scorecards` +- manual reruns of release-only or smoke workflows when appropriate -When opening a PR: +`Scorecards` is repository-posture tooling, not a normal required pull-request +check. -- use the [PR template](.github/PULL_REQUEST_TEMPLATE.md); -- link the relevant issue or design proposal when one exists; -- name the affected contract or benchmark surface precisely; -- describe what is explicitly out of scope; -- include the exact validation you ran; -- attach performance evidence when the PR makes a performance claim. +## Required branch protection / rulesets -## Commit messages +These settings cannot be fully enforced from files alone. Maintainers should +configure them in GitHub. -Use concise Conventional Commit style messages with repository-meaningful -scopes. +### For `main` -Examples: +- require pull requests before merging; +- require required status checks; +- require branches to be up to date before merging if that is part of + repository policy; +- require conversation resolution if that is part of repository policy; +- disallow bypass where possible; +- disallow force pushes; +- disallow deletions; +- optionally require linear history. -- `fix(pool): preserve reuse denial before reset` -- `docs(lifecycle): clarify post-Put ownership boundary` -- `test(backend): cover sync.Pool miss path invariants` -- `docs(performance): update benchmark matrix naming` -- `refactor(bench): centralize artifact path handling` +### Recommended required checks -Avoid vague messages such as: +For `main`: -- `fix stuff` -- `update files` -- `cleanup` +- `test (ubuntu-latest)` +- `test (macos-latest)` +- `test (windows-latest)` +- `race-and-vet` +- `ci-summary` +- `golangci-lint` +- `docs-smoke` +- `benchmark-smoke` +- `commitlint` -## Reviewer expectations +`dependency-review`, `govulncheck`, and `CodeQL` may also be required if +maintainers want stricter security gating on `main`. -Maintainers review for: +Maintainers should also keep the GitHub default branch set to `main`. -- scope alignment; -- lifecycle correctness; -- ownership clarity; -- code and doc consistency; -- benchmark discipline; -- long-term maintainability. +## Release policy -A change may be rejected even when it "works" if it: +- Stable releases are created from SemVer tags on `main`. +- Release tags use `vX.Y.Z`. +- Examples: + - `v0.1.1` for bug fixes; + - `v0.2.0` for compatible new functionality; + - `v1.0.0` for the first stable API release. +- Stable release workflows must not publish from feature branches. +- Stable release workflows must not publish from an obsolete `next` branch. +- Stable tags must point to commits reachable from `main`. +- This repository does not use a permanent prerelease branch or a permanent + `next` prerelease flow. -- broadens the product without justification; -- weakens lifecycle clarity; -- hides important behavior behind abstraction; -- adds unsupported performance claims; -- leaves documentation more ambiguous than before. +If temporary release stabilization is needed later, maintainers may use a +branch such as `release/v0.2.0`. That is exceptional and should not replace +the normal `topic branch -> PR into main -> tag from main` flow. -## Security and provenance +## Dependabot policy -Do not open a public bug report first if the issue may be security-sensitive. -Use the repository's private vulnerability reporting route instead of the -public issue forms. +- Dependabot version update pull requests should target `main`. +- Dependency update pull requests must pass the same checks as ordinary changes. +- Dependency update pull requests must not bypass protected-branch policy. +- Security update routing can still depend on GitHub repository settings + outside this repository. -For this package, security-relevant issues can include: +## Documentation and benchmark expectations -- stale data exposure through reuse; -- ownership confusion after `Put`; -- concurrency misuse caused by incorrect runtime behavior; -- documentation that could lead callers to unsafe assumptions. +Documentation is part of repository quality. If a change affects lifecycle, +ownership, benchmark workflow, or repository navigation, update the relevant +docs in the same change. -If you copy or adapt third-party material: +Start with: -- state that explicitly in the PR; -- preserve attribution when required; -- make sure the license is compatible; -- do not paste external code or text silently. +- [README](README.md) +- [Docs index](docs/index.md) +- [Lifecycle guide](docs/lifecycle.md) +- [Architecture guide](docs/architecture.md) +- [Performance overview](docs/performance/README.md) -## Getting help +Performance claims should be backed by repository evidence, not only by a +single local number. Use the maintained benchmark workflow and keep raw +artifacts, comparison outputs, and reports aligned with the documentation. -When you are unsure where to start: +## Security reporting -1. read [README](README.md) and [Docs index](docs/index.md); -2. read the narrow authority document for the area you are changing; -3. choose the smallest issue route from [Issue forms](.github/ISSUE_TEMPLATE/); -4. open a focused PR using the [PR template](.github/PULL_REQUEST_TEMPLATE.md). +Do not open a public issue first for a potentially security-sensitive problem. +Use the repository security reporting path described in: + +- [SECURITY.md](SECURITY.md) + +For this repository, security-relevant issues can include: + +- stale data retention across pooled reuse; +- ownership confusion after `Put`; +- concurrency misuse caused by incorrect runtime behavior or guidance; +- documentation that could lead users to unsafe assumptions. ## Summary A strong contribution to `arcoris.dev/pool` is: -- small in scope; -- explicit about behavior and limits; -- validated proportionally to the change; -- synchronized with the relevant documentation; -- and, for performance work, supported by real repository evidence. +- based on current `main`; +- small and typed clearly; +- validated locally before review; +- opened as a pull request into the protected branch; +- documented when behavior or workflow changes; +- and released only from SemVer tags on `main`. From 7ed8c4ffd847c66c0bdc9bc6daab792013e78cb3 Mon Sep 17 00:00:00 2001 From: Anton Bochkov Date: Fri, 24 Apr 2026 12:15:41 +0300 Subject: [PATCH 6/6] chore(lint): fix benchmark and test style issues --- internal/backend/syncpool_benchmark_test.go | 7 +++---- internal/backend/syncpool_test.go | 11 +++++++---- options_test.go | 2 +- pool_baseline_benchmark_test.go | 12 ++++++------ pool_metrics_benchmark_test.go | 6 +++--- pool_paths_benchmark_test.go | 12 ++++++------ pool_shapes_benchmark_test.go | 6 +++--- 7 files changed, 29 insertions(+), 27 deletions(-) diff --git a/internal/backend/syncpool_benchmark_test.go b/internal/backend/syncpool_benchmark_test.go index eb49707..2e3a4d2 100644 --- a/internal/backend/syncpool_benchmark_test.go +++ b/internal/backend/syncpool_benchmark_test.go @@ -83,7 +83,6 @@ func putSyncPoolBenchmarkValue( p *SyncPool[syncPoolBenchmarkValue], v syncPoolBenchmarkValue, ) { - //nolint:staticcheck // This benchmark intentionally measures the by-value backend contrast path. p.Put(v) } @@ -111,7 +110,7 @@ func BenchmarkSyncPool_GetMiss(b *testing.B) { b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for i := range b.N { v := p.Get() v.ID = i syncPoolBenchmarkPointerSink = v @@ -149,7 +148,7 @@ func BenchmarkSyncPool_ControlledGetPut_Pointer(b *testing.B) { b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for i := range b.N { v := p.Get() v.ID = i v.Data[0] = byte(i) @@ -188,7 +187,7 @@ func BenchmarkSyncPool_ControlledGetPut_Value(b *testing.B) { b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for i := range b.N { v := p.Get() v.A = uint64(i) v.B = uint64(i * 2) diff --git a/internal/backend/syncpool_test.go b/internal/backend/syncpool_test.go index 9a6791d..6e374cd 100644 --- a/internal/backend/syncpool_test.go +++ b/internal/backend/syncpool_test.go @@ -134,7 +134,10 @@ func TestSyncPoolTypedNilHandling(t *testing.T) { switch { case got == nil: if calls != 0 { - t.Fatalf("constructor call count after nil pointer round-trip = %d, want 0 when stored nil was reused", calls) + t.Fatalf( + "constructor call count after nil pointer round-trip = %d, want 0 when stored nil was reused", + calls, + ) } case got.ID == 1: if calls != 1 { @@ -265,12 +268,12 @@ func assertValueGetAfterPut( ) { t.Helper() - switch { - case got == stored: + switch got { + case stored: if constructorCalls != 0 { t.Fatalf("constructor call count after value reuse = %d, want 0", constructorCalls) } - case got == (syncPoolTestObject{ID: 1}): + case syncPoolTestObject{ID: 1}: if constructorCalls != 1 { t.Fatalf("constructor call count after value fallback construction = %d, want 1", constructorCalls) } diff --git a/options_test.go b/options_test.go index adb4f44..7bedd9e 100644 --- a/options_test.go +++ b/options_test.go @@ -289,7 +289,7 @@ func TestDefaultPolicies(t *testing.T) { } }) - t.Run("noopDrop accepts pointer and value inputs", func(t *testing.T) { + t.Run("noopDrop accepts pointer and value inputs", func(_ *testing.T) { // noopDrop has no observable state change; the contract is simply that it // accepts any T without panicking. noopDrop(&optionsTestObject{count: 1}) diff --git a/pool_baseline_benchmark_test.go b/pool_baseline_benchmark_test.go index 0de5b44..84cb1a0 100644 --- a/pool_baseline_benchmark_test.go +++ b/pool_baseline_benchmark_test.go @@ -144,7 +144,7 @@ func benchmarkBaselineAllocOnlyPointer(b *testing.B) { b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for i := range b.N { news++ v := newBaselinePointer() exerciseBaselinePointer(v, i) @@ -179,7 +179,7 @@ func benchmarkBaselineControlledRawSyncPoolPointer(b *testing.B) { b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for i := range b.N { v := p.Get().(*baselinePointer) exerciseBaselinePointer(v, i) baselinePointerSink = v @@ -212,7 +212,7 @@ func benchmarkBaselineControlledPoolPointer(b *testing.B) { b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for i := range b.N { v := p.Get() exerciseBaselinePointer(v, i) baselinePointerSink = v @@ -232,7 +232,7 @@ func benchmarkBaselineAllocOnlyValue(b *testing.B) { b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for i := range b.N { news++ baselineValueSink = exerciseBaselineValue(baselineValue{}, i) } @@ -264,7 +264,7 @@ func benchmarkBaselineControlledRawSyncPoolValue(b *testing.B) { b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for i := range b.N { v := p.Get().(baselineValue) v = exerciseBaselineValue(v, i) baselineValueSink = v @@ -295,7 +295,7 @@ func benchmarkBaselineControlledPoolValue(b *testing.B) { b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for i := range b.N { v := p.Get() v = exerciseBaselineValue(v, i) baselineValueSink = v diff --git a/pool_metrics_benchmark_test.go b/pool_metrics_benchmark_test.go index 4683a6c..71ea676 100644 --- a/pool_metrics_benchmark_test.go +++ b/pool_metrics_benchmark_test.go @@ -66,7 +66,7 @@ func benchmarkMetricsControlledAcceptedWarmPath(b *testing.B) { b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for i := range b.N { v := p.Get() v.Counter = i v.Payload = testutil.AppendSamplePayload(v.Payload[:0]) @@ -114,7 +114,7 @@ func benchmarkMetricsRealisticRejectedSteadyState(b *testing.B) { b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for i := range b.N { v := p.Get() v.Counter = i v.Payload = testutil.AppendSamplePayload(v.Payload[:0]) @@ -170,7 +170,7 @@ func benchmarkMetricsRealisticMixedReuse(b *testing.B) { b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for i := range b.N { v := p.Get() v.Counter = i if i%8 == 0 { diff --git a/pool_paths_benchmark_test.go b/pool_paths_benchmark_test.go index 8a5255e..b57f2a2 100644 --- a/pool_paths_benchmark_test.go +++ b/pool_paths_benchmark_test.go @@ -197,7 +197,7 @@ func benchmarkPathsControlledAccepted(b *testing.B) { b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for i := range b.N { v := p.Get() useAcceptedPath(v, i) pathAcceptedSink = v @@ -234,7 +234,7 @@ func benchmarkPathsRealisticAccepted(b *testing.B) { b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for i := range b.N { v := p.Get() useAcceptedPath(v, i) pathAcceptedSink = v @@ -271,7 +271,7 @@ func benchmarkPathsRealisticRejected(b *testing.B) { b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for i := range b.N { v := p.Get() useAcceptedPath(v, i) p.Put(v) @@ -302,7 +302,7 @@ func benchmarkPathsControlledResetHeavy(b *testing.B) { b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for i := range b.N { v := p.Get() useHeavyResetPath(v, i) pathHeavyResetSink = v @@ -335,7 +335,7 @@ func benchmarkPathsRealisticDropObserved(b *testing.B) { }, OnDrop: func(v *pathAccepted) { drops++ - for i := 0; i < len(v.Scratch); i++ { + for i := range len(v.Scratch) { dropWork += uint64(v.Scratch[i]) } pathDropSink = v @@ -345,7 +345,7 @@ func benchmarkPathsRealisticDropObserved(b *testing.B) { b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for i := range b.N { v := p.Get() useAcceptedPath(v, i) // Make the drop callback do measurable work on the accumulated state. diff --git a/pool_shapes_benchmark_test.go b/pool_shapes_benchmark_test.go index f43f4b4..ae5055a 100644 --- a/pool_shapes_benchmark_test.go +++ b/pool_shapes_benchmark_test.go @@ -121,7 +121,7 @@ func benchmarkAcceptedShape[T any]( b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for i := range b.N { v := p.Get() v = use(v, i) store(v) @@ -182,7 +182,7 @@ func benchmarkControlledShapePointerWithSlices(b *testing.B) { }, func(v *shapePointerWithSlices, i int) *shapePointerWithSlices { v.Bytes = append(v.Bytes[:0], byte(i), byte(i>>8), byte(i>>16), byte(i>>24)) - for j := 0; j < 16; j++ { + for j := range 16 { v.Tokens = append(v.Tokens, shapeToken{Kind: j, Value: i + j}) } return v @@ -307,7 +307,7 @@ func benchmarkShapeAlwaysOversizedRejected(b *testing.B) { b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { v := p.Get() // This benchmark is intentionally "always oversized", not mixed-size. // Every iteration crosses the reuse threshold before Put.