Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Verify Reaper state to create new or return existing instance #782

Merged
6 changes: 3 additions & 3 deletions docker.go
Original file line number Diff line number Diff line change
Expand Up @@ -984,7 +984,7 @@ func (p *DockerProvider) CreateContainer(ctx context.Context, req ContainerReque
// the reaper does not need to start a reaper for itself
isReaperContainer := strings.EqualFold(req.Image, reaperImage(reaperOpts.ImageName))
if !req.SkipReaper && !isReaperContainer {
r, err := newReaper(context.WithValue(ctx, testcontainersdocker.DockerHostContextKey, p.host), sessionID.String(), p, req.ReaperOptions...)
r, err := reuseOrCreateReaper(context.WithValue(ctx, testcontainersdocker.DockerHostContextKey, p.host), sessionID.String(), p, req.ReaperOptions...)
if err != nil {
return nil, fmt.Errorf("%w: creating reaper failed", err)
}
Expand Down Expand Up @@ -1201,7 +1201,7 @@ func (p *DockerProvider) ReuseOrCreateContainer(ctx context.Context, req Contain
sessionID := sessionID()
var termSignal chan bool
if !req.SkipReaper {
r, err := newReaper(context.WithValue(ctx, testcontainersdocker.DockerHostContextKey, p.host), sessionID.String(), p, req.ReaperOptions...)
r, err := reuseOrCreateReaper(context.WithValue(ctx, testcontainersdocker.DockerHostContextKey, p.host), sessionID.String(), p, req.ReaperOptions...)
if err != nil {
return nil, fmt.Errorf("%w: creating reaper failed", err)
}
Expand Down Expand Up @@ -1356,7 +1356,7 @@ func (p *DockerProvider) CreateNetwork(ctx context.Context, req NetworkRequest)
var termSignal chan bool
if !req.SkipReaper {
sessionID := sessionID()
r, err := newReaper(context.WithValue(ctx, testcontainersdocker.DockerHostContextKey, p.host), sessionID.String(), p, req.ReaperOptions...)
r, err := reuseOrCreateReaper(context.WithValue(ctx, testcontainersdocker.DockerHostContextKey, p.host), sessionID.String(), p, req.ReaperOptions...)
if err != nil {
return nil, fmt.Errorf("%w: creating network reaper failed", err)
}
Expand Down
39 changes: 29 additions & 10 deletions reaper.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,8 @@ const (
)

var (
reaper *Reaper // We would like to create reaper only once
mutex sync.Mutex
reaperInstance *Reaper // We would like to create reaper only once
mutex sync.Mutex
)

// ReaperProvider represents a provider for the reaper to run itself with
Expand All @@ -38,22 +38,39 @@ type ReaperProvider interface {
// NewReaper creates a Reaper with a sessionID to identify containers and a provider to use
// Deprecated: it's not possible to create a reaper anymore.
func NewReaper(ctx context.Context, sessionID string, provider ReaperProvider, reaperImageName string) (*Reaper, error) {
return newReaper(ctx, sessionID, provider, WithImageName(reaperImageName))
return reuseOrCreateReaper(ctx, sessionID, provider, WithImageName(reaperImageName))
}

// newReaper creates a Reaper with a sessionID to identify containers and a provider to use
func newReaper(ctx context.Context, sessionID string, provider ReaperProvider, opts ...ContainerOption) (*Reaper, error) {
// reuseOrCreateReaper returns an existing Reaper instance if it exists and is running. Otherwise, a new Reaper instance
// will be created with a sessionID to identify containers and a provider to use
func reuseOrCreateReaper(ctx context.Context, sessionID string, provider ReaperProvider, opts ...ContainerOption) (*Reaper, error) {
mutex.Lock()
defer mutex.Unlock()
// If reaper already exists re-use it
if reaper != nil {
return reaper, nil
// If reaper already exists and healthy, re-use it
if reaperInstance != nil {
// Verify this instance is still running by checking state.
// Can't use Container.IsRunning because the bool is not updated when Reaper is terminated
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Meanwhile the CI runs (tests will pass) can you elaborate a bit on this comment? Do you think that bool should be updated in consequence (probably here? in a follow-up?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So the Container.IsRunning is simply a boolean flag (

isRunning bool
) that gets updated when a container is stopped / terminated (
c.isRunning = false
). And while the Reaper is a 'regular' container, it is never terminated via the Stop() or Terminate() methods. Instead, it terminates automatically by itself by closing the connection to it (
case c.terminationSignal <- true:
).
Which makes the IsRunning() unreliable for the Reaper container as it would never return false.

That's why I chose to poll Docker and get the state from there, as it should be reliable.

What I think would be better, is if the IsRunning() method (

func (c *DockerContainer) IsRunning() bool {
) gets updated to what I'm doing below; checking from Docker if the state is running or not. That would make it reliable and not depend on a boolean flag internally updated.
I did not want to touch this code in the PR because I don't know the history behind it. Perhaps IsRunning is regularly checked and would be relatively heavy on the Docker calls?

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I see, thanks for such a clear explanation, now I see what you mean.

I think checking the state from Docker would be more accurate than the one we have now, and I understand your concerns about performance if that method is called many times. I've checked their usages and are just a few of them at this moment (particularly worried on the GenericContainer function calling it).

Adding that code, we could also take the situation to a place where multiple goroutines in tests are trying to read the container state at the same time, and one decides to see it as not running, and the next one as running, which could be worse.

I'd keep this simple as in your submission, and evaluate if we need to check for the state more frequently.

state, err := reaperInstance.container.State(ctx)
if err == nil && state.Running {
return reaperInstance, nil
}
}

r, err := newReaper(ctx, sessionID, provider, opts...)
if err != nil {
return nil, err
}

reaperInstance = r
return reaperInstance, nil
}

// newReaper creates a Reaper with a sessionID to identify containers and a provider to use
// Should only be used internally and instead use reuseOrCreateReaper to prefer reusing an existing Reaper instance
func newReaper(ctx context.Context, sessionID string, provider ReaperProvider, opts ...ContainerOption) (*Reaper, error) {
dockerHost := testcontainersdocker.ExtractDockerHost(ctx)

// Otherwise create a new one
reaper = &Reaper{
reaper := &Reaper{
Provider: provider,
SessionID: sessionID,
}
Expand Down Expand Up @@ -104,6 +121,7 @@ func newReaper(ctx context.Context, sessionID string, provider ReaperProvider, o
if err != nil {
return nil, err
}
reaper.container = c

endpoint, err := c.PortEndpoint(ctx, "8080", "")
if err != nil {
Expand All @@ -119,6 +137,7 @@ type Reaper struct {
Provider ReaperProvider
SessionID string
Endpoint string
container Container
}

// Connect runs a goroutine which can be terminated by sending true into the returned channel
Expand Down
31 changes: 25 additions & 6 deletions reaper_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,8 +57,6 @@ func createContainerRequest(customize func(ContainerRequest) ContainerRequest) C
}

func Test_NewReaper(t *testing.T) {
defer func() { reaper = nil }()

type cases struct {
name string
req ContainerRequest
Expand Down Expand Up @@ -105,8 +103,6 @@ func Test_NewReaper(t *testing.T) {

for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
// make sure we re-initialize the singleton
reaper = nil
provider := &mockReaperProvider{
config: test.config,
}
Expand All @@ -125,8 +121,6 @@ func Test_NewReaper(t *testing.T) {
}

func Test_ReaperForNetwork(t *testing.T) {
defer func() { reaper = nil }()

ctx := context.Background()

networkName := "test-network-with-custom-reaper"
Expand All @@ -153,3 +147,28 @@ func Test_ReaperForNetwork(t *testing.T) {
assert.Equal(t, "reaperImage", provider.req.Image)
assert.Equal(t, "reaperImage", provider.req.ReaperImage)
}

func Test_ReaperReusedIfHealthy(t *testing.T) {
SkipIfProviderIsNotHealthy(&testing.T{})

ctx := context.Background()
// As other integration tests run with the (shared) Reaper as well, re-use the instance to not interrupt other tests
wasReaperRunning := reaperInstance != nil

provider, _ := ProviderDocker.GetProvider()
reaper, err := reuseOrCreateReaper(context.WithValue(ctx, testcontainersdocker.DockerHostContextKey, provider.(*DockerProvider).host), "sessionId", provider)
assert.NoError(t, err, "creating the Reaper should not error")

reaperReused, _ := reuseOrCreateReaper(context.WithValue(ctx, testcontainersdocker.DockerHostContextKey, provider.(*DockerProvider).host), "sessionId", provider)
assert.Same(t, reaper, reaperReused, "expecting the same reaper instance is returned if running and healthy")

terminate, err := reaper.Connect()
defer func(term chan bool) {
term <- true
}(terminate)
assert.NoError(t, err, "connecting to Reaper should be successful")

if !wasReaperRunning {
terminateContainerOnEnd(t, ctx, reaper.container)
}
}