Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feat: add chat-gpt step and its example #156

Merged
merged 1 commit into from
Apr 10, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
132 changes: 132 additions & 0 deletions charts/vela-workflow/templates/definitions/chat-gpt.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,132 @@
# Code generated by KubeVela templates. DO NOT EDIT. Please edit the original cue file.
# Definition source cue file: vela-templates/definitions/internal/chat-gpt.cue
apiVersion: core.oam.dev/v1beta1
kind: WorkflowStepDefinition
metadata:
annotations:
custom.definition.oam.dev/category: External Intergration
definition.oam.dev/alias: ""
definition.oam.dev/description: Send request to chat-gpt
definition.oam.dev/example-url: https://raw.githubusercontent.com/kubevela/workflow/main/examples/workflow-run/chat-gpt.yaml
name: chat-gpt
namespace: {{ include "systemDefinitionNamespace" . }}
spec:
schematic:
cue:
template: |
import (
"vela/op"
"encoding/json"
"encoding/base64"
)

token: op.#Steps & {
if parameter.token.value != _|_ {
value: parameter.token.value
}
if parameter.token.secretRef != _|_ {
read: op.#Read & {
value: {
apiVersion: "v1"
kind: "Secret"
metadata: {
name: parameter.token.secretRef.name
namespace: context.namespace
}
}
}

stringValue: op.#ConvertString & {bt: base64.Decode(null, read.value.data[parameter.token.secretRef.key])}
value: stringValue.str
}
}
http: op.#HTTPDo & {
method: "POST"
url: "https://api.openai.com/v1/chat/completions"
request: {
timeout: parameter.timeout
body: json.Marshal({
model: parameter.model
messages: [{
if parameter.prompt.type == "custom" {
content: parameter.prompt.content
}
if parameter.prompt.type == "diagnose" {
content: """
You are a professional kubernetes administrator.
Carefully read the provided information, being certain to spell out the diagnosis & reasoning, and don't skip any steps.
Answer in \(parameter.prompt.lang).
---
\(json.Marshal(parameter.prompt.content))
---
What is wrong with this object and how to fix it?
"""
}
if parameter.prompt.type == "audit" {
content: """
You are a professional kubernetes administrator.
You inspect the object and find out the security misconfigurations and give advice.
Write down the possible problems in bullet points, using the imperative tense.
Remember to write only the most important points and do not write more than a few bullet points.
Answer in \(parameter.prompt.lang).
---
\(json.Marshal(parameter.prompt.content))
---
What is the secure problem with this object and how to fix it?
"""
}
if parameter.prompt.type == "quality-gate" {
content: """
You are a professional kubernetes administrator.
You inspect the object and find out the security misconfigurations and rate the object. The max score is 100.
Answer with score only.
---
\(json.Marshal(parameter.prompt.content))
---
What is the score of this object?
"""
}
role: "user"
}]
})
header: {
"Content-Type": "application/json"
Authorization: "Bearer \(token.value)"
}
}
}
response: json.Unmarshal(http.response.body)
fail: op.#Steps & {
if http.response.statusCode >= 400 {
requestFail: op.#Fail & {
message: "\(http.response.statusCode): failed to request: \(response.error.message)"
}
}
}
result: response.choices[0].message.content
log: op.#Log & {
data: result
}
parameter: {
token: close({
// +usage=the token value
value: string
}) | close({
secretRef: {
// +usage=name is the name of the secret
name: string
// +usage=key is the token key in the secret
key: string
}
})
// +usage=the model name
model: *"gpt-3.5-turbo" | string
// +usage=the prompt to use
prompt: {
type: *"custom" | "diagnose" | "audit" | "quality-gate"
lang: *"English" | "Chinese"
content: string | {...}
}
timeout: *"30s" | string
}

49 changes: 49 additions & 0 deletions examples/workflow-run/chat-gpt.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
apiVersion: core.oam.dev/v1alpha1
kind: WorkflowRun
metadata:
name: chat-gpt
namespace: default
spec:
workflowSpec:
steps:
# apply a deployment with invalid image, this step will fail because of timeout
# the resource will be passed to chat-gpt step to anaylze
- name: apply
type: apply-deployment
timeout: 3s
outputs:
- name: resource
valueFrom: output.value
properties:
image: invalid

# if apply step failed, send the resource to chat-gpt to diagnose
- name: chat-diagnose
if: status.apply.failed
type: chat-gpt
inputs:
- from: resource
parameterKey: prompt.content
properties:
token:
# specify your token
value: <your token>
prompt:
type: diagnose

# if apply step succeeded, send the resource to chat-gpt to audit
- name: chat-audit
if: status.apply.succeeded
type: chat-gpt
inputs:
- from: resource
parameterKey: prompt.content
properties:
token:
# or read your token from secret
secretRef:
name: chat-gpt-token-secret
key: token
prompt:
type: audit
lang: Chinese