forked from GoogleCloudPlatform/golang-samples
-
Notifications
You must be signed in to change notification settings - Fork 0
/
multimodalall.go
90 lines (77 loc) · 3.04 KB
/
multimodalall.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
// Copyright 2024 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// multimodalall shows an example of understanding a video with audio as multimodal input
package multimodalall
// [START generativeaionvertexai_gemini_all_modalities]
import (
"context"
"errors"
"fmt"
"io"
"mime"
"path/filepath"
"cloud.google.com/go/vertexai/genai"
)
// multimodalPrompt is a sample prompt type consisting of one video, one image, and a text question.
type multimodalPrompt struct {
// video and image are Google Cloud Storage paths starting with "gs://"
video, image string
// question is the question asked to the model
question string
}
// generateContentFromVideoWithAudio shows how to send a multi-modal prompt to a model, writing the response to
// the provided io.Writer.
func generateContentFromVideoWithAudio(w io.Writer, prompt multimodalPrompt, projectID, location, modelName string) error {
// prompt := multimodalPrompt{
// video: "gs://cloud-samples-data/generative-ai/video/behind_the_scenes_pixel.mp4",
// image: "gs://cloud-samples-data/generative-ai/image/a-man-and-a-dog.png",
// question: `
// Watch each frame in the video carefully and answer the questions.
// Only base your answers strictly on what information is available in the video attached.
// Do not make up any information that is not part of the video and do not be too
// verbose, be to the point.
//
// Questions:
// - When is the moment in the image happening in the video? Provide a timestamp.
// - What is the context of the moment and what does the narrator say about it?
// `,
// location := "us-central1"
// modelName := "gemini-1.5-pro-preview-0409"
ctx := context.Background()
client, err := genai.NewClient(ctx, projectID, location)
if err != nil {
return fmt.Errorf("unable to create client: %w", err)
}
defer client.Close()
model := client.GenerativeModel(modelName)
vidPart := genai.FileData{
MIMEType: mime.TypeByExtension(filepath.Ext(prompt.video)),
FileURI: prompt.video,
}
imgPart := genai.FileData{
MIMEType: mime.TypeByExtension(filepath.Ext(prompt.image)),
FileURI: prompt.image,
}
res, err := model.GenerateContent(ctx, vidPart, imgPart, genai.Text(prompt.question))
if err != nil {
return fmt.Errorf("unable to generate contents: %w", err)
}
if len(res.Candidates) == 0 ||
len(res.Candidates[0].Content.Parts) == 0 {
return errors.New("empty response from model")
}
fmt.Fprintf(w, "generated response: %s\n", res.Candidates[0].Content.Parts[0])
return nil
}
// [END generativeaionvertexai_gemini_all_modalities]