Skip to content

Commit

Permalink
go/store/datas/pull: pull_chunk_fetcher: Move chunk fetching to a str…
Browse files Browse the repository at this point in the history
…eaming interface instead of batch.

We want to better pipeline I/O when pulling from a remote, and moving a
streaming interface where storage can see more of the addresses that are needed
at once will allow us to achieve it.

For now, we implement the streaming interface just by calling the existing
batch get interface.
  • Loading branch information
reltuk committed Mar 25, 2024
1 parent 676c7e9 commit b05d3d9
Show file tree
Hide file tree
Showing 5 changed files with 440 additions and 111 deletions.
174 changes: 174 additions & 0 deletions go/store/datas/pull/pull_chunk_fetcher.go
@@ -0,0 +1,174 @@
// Copyright 2024 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package pull

import (
"context"
"io"
"sync"

"golang.org/x/sync/errgroup"

"github.com/dolthub/dolt/go/store/hash"
"github.com/dolthub/dolt/go/store/nbs"
)

type GetManyer interface {
GetManyCompressed(ctx context.Context, hashes hash.HashSet, found func(context.Context, nbs.CompressedChunk)) error
}

// A ChunkFetcher is a batching, stateful, potentially concurrent interface to
// fetch lots of chunks from a ChunkStore. A caller is expected to call
// `Get()` and `Recv()` concurrently. Unless there is an error, for every
// single Hash passed to Get, a corresponding Recv() call will deliver the
// contents of the chunk. When a caller is done with a ChunkFetcher, they
// should call |CloseSend()|. After CloseSend, all requested hashes have been
// delivered through Recv(), Recv() will return `io.EOF`.
//
// A ChunkFetcher should be Closed() when it is no longer needed. In non-error
// cases, this will typically be after Recv() has delivererd io.EOF. If Close
// is called before Recv() delivers io.EOF, there is no guarantee that all
// requested chunks will be delivered through Recv().
//
// In contrast to interfaces to like GetManyCompressed on ChunkStore, if the
// chunk is not in the underlying database, then Recv() will return an
// nbs.CompressedChunk with its Hash set, but with empty contents.
//
// Other than an io.EOF from Recv(), any |error| returned from any method
// indicates an underlying problem wtih fetching requested chunks from the
// ChunkStore. A ChunkFetcher is single use and cannot be used effectively
// after an error is returned.
type ChunkFetcher interface {
Get(ctx context.Context, hashes hash.HashSet) error

CloseSend() error

Recv(context.Context) (nbs.CompressedChunk, error)

Close() error
}

// A PullChunkFetcher is a simple implementation of |ChunkFetcher| based on
// calling GetManyCompressed.
//
// It only has one outstanding GetManyCompressed call at a time.
type PullChunkFetcher struct {
ctx context.Context
eg *errgroup.Group

getter GetManyer

batchCh chan hash.HashSet
doneCh chan struct{}
resCh chan nbs.CompressedChunk
}

func NewPullChunkFetcher(ctx context.Context, getter GetManyer) *PullChunkFetcher {
eg, ctx := errgroup.WithContext(ctx)
ret := &PullChunkFetcher{
ctx: ctx,
eg: eg,
getter: getter,
batchCh: make(chan hash.HashSet),
doneCh: make(chan struct{}),
resCh: make(chan nbs.CompressedChunk),
}
ret.eg.Go(func() error {
return ret.fetcherThread(func() {
close(ret.resCh)
})
})
return ret
}

func (f *PullChunkFetcher) fetcherThread(finalize func()) error {
for {
select {
case batch, ok := <-f.batchCh:
if !ok {
finalize()
return nil
}

var mu sync.Mutex
missing := batch.Copy()

// Blocking get, no concurrency, only one fetcher.
err := f.getter.GetManyCompressed(f.ctx, batch, func(ctx context.Context, chk nbs.CompressedChunk) {
mu.Lock()
missing.Remove(chk.H)
mu.Unlock()
select {
case <-ctx.Done():
case <-f.ctx.Done():
case f.resCh <- chk:
case <-f.doneCh:
}
})
if err != nil {
return err
}

for h := range missing {
select {
case <-f.ctx.Done():
return context.Cause(f.ctx)
case f.resCh <- nbs.CompressedChunk{H: h}:
case <-f.doneCh:
return nil
}
}
case <-f.ctx.Done():
return context.Cause(f.ctx)
case <-f.doneCh:
return nil
}
}
}

func (f *PullChunkFetcher) Get(ctx context.Context, hashes hash.HashSet) error {
select {
case f.batchCh <- hashes:
return nil
case <-ctx.Done():
return context.Cause(ctx)
case <-f.ctx.Done():
return context.Cause(f.ctx)
}
}

func (f *PullChunkFetcher) CloseSend() error {
close(f.batchCh)
return nil
}

func (f *PullChunkFetcher) Close() error {
close(f.doneCh)
return f.eg.Wait()
}

func (f *PullChunkFetcher) Recv(ctx context.Context) (nbs.CompressedChunk, error) {
select {
case res, ok := <-f.resCh:
if !ok {
return nbs.CompressedChunk{}, io.EOF
}
return res, nil
case <-ctx.Done():
return nbs.CompressedChunk{}, context.Cause(ctx)
case <-f.ctx.Done():
return nbs.CompressedChunk{}, context.Cause(f.ctx)
}
}
170 changes: 170 additions & 0 deletions go/store/datas/pull/pull_chunk_fetcher_test.go
@@ -0,0 +1,170 @@
// Copyright 2024 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package pull

import (
"context"
"fmt"
"io"
"sync"
"testing"

"github.com/stretchr/testify/assert"

"github.com/dolthub/dolt/go/store/hash"
"github.com/dolthub/dolt/go/store/nbs"
)

func TestPullChunkFetcher(t *testing.T) {
t.Run("ImmediateCloseSend", func(t *testing.T) {
f := NewPullChunkFetcher(context.Background(), emptyGetManyer{})
assert.NoError(t, f.CloseSend())
_, err := f.Recv(context.Background())
assert.ErrorIs(t, err, io.EOF)
assert.NoError(t, f.Close())
})
t.Run("CanceledGetCtx", func(t *testing.T) {
ctx, c := context.WithCancel(context.Background())
gm := blockingGetManyer{make(chan struct{})}
f := NewPullChunkFetcher(context.Background(), gm)
hs := make(hash.HashSet)
var h hash.Hash
hs.Insert(h)
err := f.Get(ctx, hs)
assert.NoError(t, err)
c()
err = f.Get(ctx, hs)
assert.Error(t, err)
close(gm.block)
assert.NoError(t, f.Close())
})
t.Run("CanceledRecvCtx", func(t *testing.T) {
ctx, c := context.WithCancel(context.Background())
f := NewPullChunkFetcher(context.Background(), emptyGetManyer{})
c()
_, err := f.Recv(ctx)
assert.Error(t, err)
assert.NoError(t, f.Close())
})
t.Run("ReturnsDelieveredChunk", func(t *testing.T) {
var gm deliveringGetManyer
gm.C.FullCompressedChunk = make([]byte, 1024)
f := NewPullChunkFetcher(context.Background(), gm)
hs := make(hash.HashSet)
hs.Insert(gm.C.H)
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
cmp, err := f.Recv(context.Background())
assert.NoError(t, err)
assert.Equal(t, cmp.H, gm.C.H)
assert.Equal(t, cmp.FullCompressedChunk, gm.C.FullCompressedChunk)
_, err = f.Recv(context.Background())
assert.ErrorIs(t, err, io.EOF)
assert.NoError(t, f.Close())
}()
err := f.Get(context.Background(), hs)
assert.NoError(t, err)
assert.NoError(t, f.CloseSend())
wg.Wait()
})
t.Run("ReturnsEmptyCompressedChunk", func(t *testing.T) {
f := NewPullChunkFetcher(context.Background(), emptyGetManyer{})
hs := make(hash.HashSet)
var h hash.Hash
hs.Insert(h)
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
cmp, err := f.Recv(context.Background())
assert.NoError(t, err)
assert.Equal(t, cmp.H, h)
assert.Nil(t, cmp.FullCompressedChunk)
_, err = f.Recv(context.Background())
assert.ErrorIs(t, err, io.EOF)
assert.NoError(t, f.Close())
}()
err := f.Get(context.Background(), hs)
assert.NoError(t, err)
assert.NoError(t, f.CloseSend())
wg.Wait()
})
t.Run("ErrorGetManyer", func(t *testing.T) {
f := NewPullChunkFetcher(context.Background(), errorGetManyer{})
hs := make(hash.HashSet)
var h hash.Hash
hs.Insert(h)
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
_, err := f.Recv(context.Background())
assert.Error(t, err)
err = f.Close()
assert.Error(t, err)
}()
err := f.Get(context.Background(), hs)
assert.NoError(t, err)
err = f.Get(context.Background(), hs)
assert.Error(t, err)
wg.Wait()
})
t.Run("ClosedFetcherErrorsGet", func(t *testing.T) {
f := NewPullChunkFetcher(context.Background(), emptyGetManyer{})
assert.NoError(t, f.Close())
hs := make(hash.HashSet)
var h hash.Hash
hs.Insert(h)
assert.Error(t, f.Get(context.Background(), hs))
})
}

type emptyGetManyer struct {
}

func (emptyGetManyer) GetManyCompressed(ctx context.Context, hashes hash.HashSet, found func(context.Context, nbs.CompressedChunk)) error {
return nil
}

type deliveringGetManyer struct {
C nbs.CompressedChunk
}

func (d deliveringGetManyer) GetManyCompressed(ctx context.Context, hashes hash.HashSet, found func(context.Context, nbs.CompressedChunk)) error {
for _ = range hashes {
found(ctx, d.C)
}
return nil
}

type blockingGetManyer struct {
block chan struct{}
}

func (b blockingGetManyer) GetManyCompressed(ctx context.Context, hashes hash.HashSet, found func(context.Context, nbs.CompressedChunk)) error {
<-b.block
return nil
}

type errorGetManyer struct {
}

var getManyerErr = fmt.Errorf("always return an error")

func (errorGetManyer) GetManyCompressed(ctx context.Context, hashes hash.HashSet, found func(context.Context, nbs.CompressedChunk)) error {
return getManyerErr
}

0 comments on commit b05d3d9

Please sign in to comment.