/
evaluation.go
193 lines (160 loc) · 9.01 KB
/
evaluation.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
// Code generated by the Pulumi SDK Generator DO NOT EDIT.
// *** WARNING: Do not edit by hand unless you're certain you know what you are doing! ***
package v2
import (
"context"
"reflect"
"errors"
"github.com/pulumi/pulumi-google-native/sdk/go/google/internal"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
// Creates evaluation of a conversation model.
// Note - this resource's API doesn't support deletion. When deleted, the resource will persist
// on Google Cloud even though it will be deleted from Pulumi state.
type Evaluation struct {
pulumi.CustomResourceState
ConversationModelId pulumi.StringOutput `pulumi:"conversationModelId"`
// Creation time of this model.
CreateTime pulumi.StringOutput `pulumi:"createTime"`
// Optional. The display name of the model evaluation. At most 64 bytes long.
DisplayName pulumi.StringOutput `pulumi:"displayName"`
// Optional. The configuration of the evaluation task.
EvaluationConfig GoogleCloudDialogflowV2EvaluationConfigResponseOutput `pulumi:"evaluationConfig"`
Location pulumi.StringOutput `pulumi:"location"`
// The resource name of the evaluation. Format: `projects//conversationModels//evaluations/`
Name pulumi.StringOutput `pulumi:"name"`
Project pulumi.StringOutput `pulumi:"project"`
// Human eval template in csv format. It tooks real-world conversations provided through input dataset, generates example suggestions for customer to verify quality of the model. For Smart Reply, the generated csv file contains columns of Context, (Suggestions,Q1,Q2)*3, Actual reply. Context contains at most 10 latest messages in the conversation prior to the current suggestion. Q1: "Would you send it as the next message of agent?" Evaluated based on whether the suggest is appropriate to be sent by agent in current context. Q2: "Does the suggestion move the conversation closer to resolution?" Evaluated based on whether the suggestion provide solutions, or answers customer's question or collect information from customer to resolve the customer's issue. Actual reply column contains the actual agent reply sent in the context.
RawHumanEvalTemplateCsv pulumi.StringOutput `pulumi:"rawHumanEvalTemplateCsv"`
// Only available when model is for smart reply.
SmartReplyMetrics GoogleCloudDialogflowV2SmartReplyMetricsResponseOutput `pulumi:"smartReplyMetrics"`
}
// NewEvaluation registers a new resource with the given unique name, arguments, and options.
func NewEvaluation(ctx *pulumi.Context,
name string, args *EvaluationArgs, opts ...pulumi.ResourceOption) (*Evaluation, error) {
if args == nil {
return nil, errors.New("missing one or more required arguments")
}
if args.ConversationModelId == nil {
return nil, errors.New("invalid value for required argument 'ConversationModelId'")
}
replaceOnChanges := pulumi.ReplaceOnChanges([]string{
"conversationModelId",
"location",
"project",
})
opts = append(opts, replaceOnChanges)
opts = internal.PkgResourceDefaultOpts(opts)
var resource Evaluation
err := ctx.RegisterResource("google-native:dialogflow/v2:Evaluation", name, args, &resource, opts...)
if err != nil {
return nil, err
}
return &resource, nil
}
// GetEvaluation gets an existing Evaluation resource's state with the given name, ID, and optional
// state properties that are used to uniquely qualify the lookup (nil if not required).
func GetEvaluation(ctx *pulumi.Context,
name string, id pulumi.IDInput, state *EvaluationState, opts ...pulumi.ResourceOption) (*Evaluation, error) {
var resource Evaluation
err := ctx.ReadResource("google-native:dialogflow/v2:Evaluation", name, id, state, &resource, opts...)
if err != nil {
return nil, err
}
return &resource, nil
}
// Input properties used for looking up and filtering Evaluation resources.
type evaluationState struct {
}
type EvaluationState struct {
}
func (EvaluationState) ElementType() reflect.Type {
return reflect.TypeOf((*evaluationState)(nil)).Elem()
}
type evaluationArgs struct {
ConversationModelId string `pulumi:"conversationModelId"`
// Optional. The display name of the model evaluation. At most 64 bytes long.
DisplayName *string `pulumi:"displayName"`
// Optional. The configuration of the evaluation task.
EvaluationConfig *GoogleCloudDialogflowV2EvaluationConfig `pulumi:"evaluationConfig"`
Location *string `pulumi:"location"`
// The resource name of the evaluation. Format: `projects//conversationModels//evaluations/`
Name *string `pulumi:"name"`
Project *string `pulumi:"project"`
}
// The set of arguments for constructing a Evaluation resource.
type EvaluationArgs struct {
ConversationModelId pulumi.StringInput
// Optional. The display name of the model evaluation. At most 64 bytes long.
DisplayName pulumi.StringPtrInput
// Optional. The configuration of the evaluation task.
EvaluationConfig GoogleCloudDialogflowV2EvaluationConfigPtrInput
Location pulumi.StringPtrInput
// The resource name of the evaluation. Format: `projects//conversationModels//evaluations/`
Name pulumi.StringPtrInput
Project pulumi.StringPtrInput
}
func (EvaluationArgs) ElementType() reflect.Type {
return reflect.TypeOf((*evaluationArgs)(nil)).Elem()
}
type EvaluationInput interface {
pulumi.Input
ToEvaluationOutput() EvaluationOutput
ToEvaluationOutputWithContext(ctx context.Context) EvaluationOutput
}
func (*Evaluation) ElementType() reflect.Type {
return reflect.TypeOf((**Evaluation)(nil)).Elem()
}
func (i *Evaluation) ToEvaluationOutput() EvaluationOutput {
return i.ToEvaluationOutputWithContext(context.Background())
}
func (i *Evaluation) ToEvaluationOutputWithContext(ctx context.Context) EvaluationOutput {
return pulumi.ToOutputWithContext(ctx, i).(EvaluationOutput)
}
type EvaluationOutput struct{ *pulumi.OutputState }
func (EvaluationOutput) ElementType() reflect.Type {
return reflect.TypeOf((**Evaluation)(nil)).Elem()
}
func (o EvaluationOutput) ToEvaluationOutput() EvaluationOutput {
return o
}
func (o EvaluationOutput) ToEvaluationOutputWithContext(ctx context.Context) EvaluationOutput {
return o
}
func (o EvaluationOutput) ConversationModelId() pulumi.StringOutput {
return o.ApplyT(func(v *Evaluation) pulumi.StringOutput { return v.ConversationModelId }).(pulumi.StringOutput)
}
// Creation time of this model.
func (o EvaluationOutput) CreateTime() pulumi.StringOutput {
return o.ApplyT(func(v *Evaluation) pulumi.StringOutput { return v.CreateTime }).(pulumi.StringOutput)
}
// Optional. The display name of the model evaluation. At most 64 bytes long.
func (o EvaluationOutput) DisplayName() pulumi.StringOutput {
return o.ApplyT(func(v *Evaluation) pulumi.StringOutput { return v.DisplayName }).(pulumi.StringOutput)
}
// Optional. The configuration of the evaluation task.
func (o EvaluationOutput) EvaluationConfig() GoogleCloudDialogflowV2EvaluationConfigResponseOutput {
return o.ApplyT(func(v *Evaluation) GoogleCloudDialogflowV2EvaluationConfigResponseOutput { return v.EvaluationConfig }).(GoogleCloudDialogflowV2EvaluationConfigResponseOutput)
}
func (o EvaluationOutput) Location() pulumi.StringOutput {
return o.ApplyT(func(v *Evaluation) pulumi.StringOutput { return v.Location }).(pulumi.StringOutput)
}
// The resource name of the evaluation. Format: `projects//conversationModels//evaluations/`
func (o EvaluationOutput) Name() pulumi.StringOutput {
return o.ApplyT(func(v *Evaluation) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)
}
func (o EvaluationOutput) Project() pulumi.StringOutput {
return o.ApplyT(func(v *Evaluation) pulumi.StringOutput { return v.Project }).(pulumi.StringOutput)
}
// Human eval template in csv format. It tooks real-world conversations provided through input dataset, generates example suggestions for customer to verify quality of the model. For Smart Reply, the generated csv file contains columns of Context, (Suggestions,Q1,Q2)*3, Actual reply. Context contains at most 10 latest messages in the conversation prior to the current suggestion. Q1: "Would you send it as the next message of agent?" Evaluated based on whether the suggest is appropriate to be sent by agent in current context. Q2: "Does the suggestion move the conversation closer to resolution?" Evaluated based on whether the suggestion provide solutions, or answers customer's question or collect information from customer to resolve the customer's issue. Actual reply column contains the actual agent reply sent in the context.
func (o EvaluationOutput) RawHumanEvalTemplateCsv() pulumi.StringOutput {
return o.ApplyT(func(v *Evaluation) pulumi.StringOutput { return v.RawHumanEvalTemplateCsv }).(pulumi.StringOutput)
}
// Only available when model is for smart reply.
func (o EvaluationOutput) SmartReplyMetrics() GoogleCloudDialogflowV2SmartReplyMetricsResponseOutput {
return o.ApplyT(func(v *Evaluation) GoogleCloudDialogflowV2SmartReplyMetricsResponseOutput { return v.SmartReplyMetrics }).(GoogleCloudDialogflowV2SmartReplyMetricsResponseOutput)
}
func init() {
pulumi.RegisterInputType(reflect.TypeOf((*EvaluationInput)(nil)).Elem(), &Evaluation{})
pulumi.RegisterOutputType(EvaluationOutput{})
}