Skip to content

Commit

Permalink
chore: do not couple implementation with the ollama CLI
Browse files Browse the repository at this point in the history
  • Loading branch information
mdelapenya committed Mar 4, 2024
1 parent e2fdda2 commit 3431940
Show file tree
Hide file tree
Showing 4 changed files with 124 additions and 132 deletions.
30 changes: 16 additions & 14 deletions docs/modules/ollama.md
Original file line number Diff line number Diff line change
Expand Up @@ -42,20 +42,6 @@ for Ollama. E.g. `testcontainers.WithImage("ollama/ollama:0.1.25")`.

{% include "../features/common_functional_options.md" %}

#### With Models

It's possible to initialise the Ollama container with a specific model passed as parameter. The supported models are described in the Ollama project: [https://github.com/ollama/ollama?tab=readme-ov-file](https://github.com/ollama/ollama?tab=readme-ov-file) and [https://ollama.com/library](https://ollama.com/library).
!!!warning
At the moment you use one of those models, the Ollama image will load the model and could take longer to start because of that.
The following examples use the `llama2` model to connect to the Ollama container using HTTP and Langchain.
<!--codeinclude-->
[Using HTTP](../../modules/ollama/examples_test.go) inside_block:withHTTPModelLlama2
[Using Langchaingo](../../modules/ollama/examples_test.go) inside_block:withLangchainModelLlama2
<!--/codeinclude-->
### Container Methods

The Ollama container exposes the following methods:
Expand All @@ -77,3 +63,19 @@ and eventually use it as the base image for a new container. That will speed up
<!--codeinclude-->
[Commit Ollama image](../../modules/ollama/ollama_test.go) inside_block:commitOllamaContainer
<!--/codeinclude-->

## Examples

### Loading Models

It's possible to initialise the Ollama container with a specific model passed as parameter. The supported models are described in the Ollama project: [https://github.com/ollama/ollama?tab=readme-ov-file](https://github.com/ollama/ollama?tab=readme-ov-file) and [https://ollama.com/library](https://ollama.com/library).
!!!warning
At the moment you use one of those models, the Ollama image will load the model and could take longer to start because of that.
The following examples use the `llama2` model to connect to the Ollama container using HTTP and Langchain.
<!--codeinclude-->
[Using HTTP](../../modules/ollama/examples_test.go) inside_block:withHTTPModelLlama2
[Using Langchaingo](../../modules/ollama/examples_test.go) inside_block:withLangchainModelLlama2
<!--/codeinclude-->
28 changes: 25 additions & 3 deletions modules/ollama/examples_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,6 @@ func ExampleRunContainer_withModel_llama2_http() {
ollamaContainer, err := tcollama.RunContainer(
ctx,
testcontainers.WithImage("ollama/ollama:0.1.25"),
tcollama.WithModel("llama2"),
)
if err != nil {
log.Fatalf("failed to start container: %s", err)
Expand All @@ -60,6 +59,18 @@ func ExampleRunContainer_withModel_llama2_http() {
}
}()

model := "llama2"

_, _, err = ollamaContainer.Exec(ctx, []string{"ollama", "pull", model})
if err != nil {
log.Fatalf("failed to pull model %s: %s", model, err)
}

_, _, err = ollamaContainer.Exec(ctx, []string{"ollama", "run", model})
if err != nil {
log.Fatalf("failed to run model %s: %s", model, err)
}

connectionStr, err := ollamaContainer.ConnectionString(ctx)
if err != nil {
log.Fatalf("failed to get connection string: %s", err) // nolint:gocritic
Expand Down Expand Up @@ -96,7 +107,6 @@ func ExampleRunContainer_withModel_llama2_langchain() {
ollamaContainer, err := tcollama.RunContainer(
ctx,
testcontainers.WithImage("ollama/ollama:0.1.25"),
tcollama.WithModel("llama2"),
)
if err != nil {
log.Fatalf("failed to start container: %s", err)
Expand All @@ -107,14 +117,26 @@ func ExampleRunContainer_withModel_llama2_langchain() {
}
}()

model := "llama2"

_, _, err = ollamaContainer.Exec(ctx, []string{"ollama", "pull", model})
if err != nil {
log.Fatalf("failed to pull model %s: %s", model, err)
}

_, _, err = ollamaContainer.Exec(ctx, []string{"ollama", "run", model})
if err != nil {
log.Fatalf("failed to run model %s: %s", model, err)
}

connectionStr, err := ollamaContainer.ConnectionString(ctx)
if err != nil {
log.Fatalf("failed to get connection string: %s", err) // nolint:gocritic
}

var llm *langchainollama.LLM
if llm, err = langchainollama.New(
langchainollama.WithModel("llama2"),
langchainollama.WithModel(model),
langchainollama.WithServerURL(connectionStr),
); err != nil {
log.Fatalf("failed to create langchain ollama: %s", err) // nolint:gocritic
Expand Down
155 changes: 83 additions & 72 deletions modules/ollama/ollama_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,15 @@ import (
"context"
"fmt"
"io"
"log"
"net/http"
"strings"
"testing"

"github.com/google/uuid"

"github.com/testcontainers/testcontainers-go"
"github.com/testcontainers/testcontainers-go/exec"
"github.com/testcontainers/testcontainers-go/modules/ollama"
)

Expand Down Expand Up @@ -48,110 +50,119 @@ func TestOllama(t *testing.T) {
t.Fatalf("expected status code 200, got %d", resp.StatusCode)
}
})
}

func TestRunContainer_withModel_error(t *testing.T) {
ctx := context.Background()
t.Run("Pull and Run Model", func(t *testing.T) {
model := "all-minilm"

ollamaContainer, err := ollama.RunContainer(
ctx,
testcontainers.WithImage("ollama/ollama:0.1.25"),
ollama.WithModel("non-existent"),
)
if ollamaContainer != nil {
t.Fatal("container should not start")
}
if err == nil {
t.Fatal("expected error, got nil")
}

expectedErrorMessages := []string{
"failed to run non-existent model",
"Error: pull model manifest: file does not exist",
}

for _, expected := range expectedErrorMessages {
if !strings.Contains(err.Error(), expected) {
t.Fatalf("expected error to contain %q, got %s", expected, err)
_, _, err = container.Exec(context.Background(), []string{"ollama", "pull", model})
if err != nil {
log.Fatalf("failed to pull model %s: %s", model, err)
}
}
}

func TestDownloadModelAndCommitToImage(t *testing.T) {
ollamaContainer, err := ollama.RunContainer(
context.Background(),
testcontainers.WithImage("ollama/ollama:0.1.25"),
ollama.WithModel("all-minilm"),
)
if err != nil {
t.Fatal(err)
}
t.Cleanup(func() {
if err := ollamaContainer.Terminate(context.Background()); err != nil {
t.Fatalf("failed to terminate container: %s", err)
_, _, err = container.Exec(context.Background(), []string{"ollama", "run", model})
if err != nil {
log.Fatalf("failed to run model %s: %s", model, err)
}

assertLoadedModel(t, container)
})

assertLoadedModel := func(t *testing.T, c *ollama.OllamaContainer) {
url, err := c.ConnectionString(context.Background())
if err != nil {
t.Fatal(err)
}
t.Run("Commit to image including model", func(t *testing.T) {
// commitOllamaContainer {

httpCli := &http.Client{}
// Defining the target image name based on the default image and a random string.
// Users can change the way this is generated, but it should be unique.
targetImage := fmt.Sprintf("%s-%s", ollama.DefaultOllamaImage, strings.ToLower(uuid.New().String()[:4]))

resp, err := httpCli.Get(url + "/api/tags")
newImage, err := container.Commit(context.Background(), targetImage)
// }
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()

if resp.StatusCode != http.StatusOK {
t.Fatalf("expected status code 200, got %d", resp.StatusCode)
if newImage == "" {
t.Fatal("new image should not be empty")
}

// read JSON response

bs, err := io.ReadAll(resp.Body)
newOllamaContainer, err := ollama.RunContainer(
context.Background(),
testcontainers.WithImage(newImage),
)
if err != nil {
t.Fatal(err)
}
t.Cleanup(func() {
if err := newOllamaContainer.Terminate(context.Background()); err != nil {
t.Fatalf("failed to terminate container: %s", err)
}
})

if !strings.Contains(string(bs), "all-minilm") {
t.Fatalf("expected response to contain all-minilm, got %s", string(bs))
}
assertLoadedModel(t, newOllamaContainer)
})
}

// assertLoadedModel checks if the model is loaded in the container.
// For that, it checks if the response of the /api/tags endpoint
// contains the model name.
func assertLoadedModel(t *testing.T, c *ollama.OllamaContainer) {
url, err := c.ConnectionString(context.Background())
if err != nil {
t.Fatal(err)
}

assertLoadedModel(t, ollamaContainer)
httpCli := &http.Client{}

// commitOllamaContainer {
resp, err := httpCli.Get(url + "/api/tags")
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()

// Defining the target image name based on the default image and a random string.
// Users can change the way this is generated, but it should be unique.
targetImage := fmt.Sprintf("%s-%s", ollama.DefaultOllamaImage, strings.ToLower(uuid.New().String()[:4]))
if resp.StatusCode != http.StatusOK {
t.Fatalf("expected status code 200, got %d", resp.StatusCode)
}

newImage, err := ollamaContainer.Commit(context.Background(), targetImage)
// }
bs, err := io.ReadAll(resp.Body)
if err != nil {
t.Fatal(err)
}

if newImage == "" {
t.Fatal("new image should not be empty")
if !strings.Contains(string(bs), "all-minilm") {
t.Fatalf("expected response to contain all-minilm, got %s", string(bs))
}
}

func TestRunContainer_withModel_error(t *testing.T) {
ctx := context.Background()

newOllamaContainer, err := ollama.RunContainer(
context.Background(),
testcontainers.WithImage(newImage),
ollamaContainer, err := ollama.RunContainer(
ctx,
testcontainers.WithImage("ollama/ollama:0.1.25"),
)
if err != nil {
t.Fatal(err)
t.Fatalf("expected error to be nil, got %s", err)
}

model := "non-existent"

_, _, err = ollamaContainer.Exec(ctx, []string{"ollama", "pull", model})
if err != nil {
log.Fatalf("expected nil error, got %s", err)
}

// we need to parse the response here to check if the error message is correct
_, r, err := ollamaContainer.Exec(ctx, []string{"ollama", "run", model}, exec.Multiplexed())
if err != nil {
log.Fatalf("expected nil error, got %s", err)
}
t.Cleanup(func() {
if err := newOllamaContainer.Terminate(context.Background()); err != nil {
t.Fatalf("failed to terminate container: %s", err)
}
})

assertLoadedModel(t, newOllamaContainer)
bs, err := io.ReadAll(r)
if err != nil {
t.Fatalf("failed to run %s model: %s", model, err)
}

stdOutput := string(bs)
if !strings.Contains(stdOutput, "Error: pull model manifest: file does not exist") {
t.Fatalf("expected output to contain %q, got %s", "Error: pull model manifest: file does not exist", stdOutput)
}
}
43 changes: 0 additions & 43 deletions modules/ollama/options.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,10 @@ package ollama

import (
"context"
"fmt"
"io"
"strings"

"github.com/docker/docker/api/types/container"

"github.com/testcontainers/testcontainers-go"
"github.com/testcontainers/testcontainers-go/exec"
)

var noopCustomizeRequestOption = func(req *testcontainers.GenericContainerRequest) {}
Expand Down Expand Up @@ -41,42 +37,3 @@ func withGpu() testcontainers.CustomizeRequestOption {
}
})
}

// WithModel will run the given model, without any prompt.
// If Ollama is not able to run the given model, it will fail to initialise.
func WithModel(model string) testcontainers.CustomizeRequestOption {
pullCmds := []string{"ollama", "pull", model}
runCmds := []string{"ollama", "run", model}

return func(req *testcontainers.GenericContainerRequest) {
modelLifecycleHook := testcontainers.ContainerLifecycleHooks{
PostReadies: []testcontainers.ContainerHook{
func(ctx context.Context, c testcontainers.Container) error {
_, _, err := c.Exec(ctx, pullCmds, exec.Multiplexed())
if err != nil {
return fmt.Errorf("failed to pull model %s: %w", model, err)
}

_, r, err := c.Exec(ctx, runCmds, exec.Multiplexed())
if err != nil {
return fmt.Errorf("failed to run model %s: %w", model, err)
}

bs, err := io.ReadAll(r)
if err != nil {
return fmt.Errorf("failed to run %s model: %w", model, err)
}

stdOutput := string(bs)
if strings.Contains(stdOutput, "Error: pull model manifest: file does not exist") {
return fmt.Errorf("failed to run %s model [%v]: %s", model, runCmds, stdOutput)
}

return nil
},
},
}

req.LifecycleHooks = append(req.LifecycleHooks, modelLifecycleHook)
}
}

0 comments on commit 3431940

Please sign in to comment.