/
prediction_service.pb.go
286 lines (260 loc) · 12 KB
/
prediction_service.pb.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: tensorflow_serving/apis/prediction_service.proto
package serving
import (
context "context"
fmt "fmt"
proto "github.com/golang/protobuf/proto"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
func init() {
proto.RegisterFile("tensorflow_serving/apis/prediction_service.proto", fileDescriptor_6f2588d3ed9ea15a)
}
var fileDescriptor_6f2588d3ed9ea15a = []byte{
// 292 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xcf, 0x4b, 0xfb, 0x40,
0x10, 0xc5, 0x29, 0x85, 0x6f, 0xbf, 0xec, 0x41, 0x74, 0x8f, 0x39, 0x2a, 0xf5, 0x67, 0x49, 0x45,
0xff, 0x03, 0x7b, 0x10, 0x0f, 0x01, 0x89, 0x17, 0x6f, 0x61, 0x4d, 0x26, 0x61, 0x21, 0xd9, 0x8d,
0x3b, 0x53, 0xc5, 0xff, 0xdc, 0x83, 0x07, 0xa9, 0x3b, 0x1b, 0x69, 0x4d, 0xda, 0x5e, 0x93, 0xcf,
0x7b, 0x33, 0xef, 0xed, 0x88, 0x6b, 0x02, 0x83, 0xd6, 0x95, 0xb5, 0x7d, 0xcf, 0x10, 0xdc, 0x9b,
0x36, 0xd5, 0x5c, 0xb5, 0x1a, 0xe7, 0xad, 0x83, 0x42, 0xe7, 0xa4, 0xad, 0xf1, 0xdf, 0x73, 0x88,
0x5b, 0x67, 0xc9, 0x4a, 0xf9, 0xab, 0x88, 0x59, 0x11, 0xcd, 0x86, 0x5c, 0xf2, 0x5a, 0x21, 0xea,
0x52, 0xe7, 0x6a, 0xe5, 0xe4, 0x1d, 0xa2, 0xc1, 0x99, 0x15, 0x50, 0xd6, 0xd8, 0x02, 0xea, 0xac,
0x01, 0x52, 0x85, 0x22, 0xc5, 0x8a, 0xb3, 0x21, 0x85, 0x36, 0x25, 0x38, 0x30, 0x61, 0xb9, 0x68,
0xba, 0x23, 0x0e, 0x63, 0xe7, 0x43, 0x98, 0x83, 0xca, 0x01, 0x62, 0xb7, 0xeb, 0xcd, 0xd7, 0x58,
0x1c, 0x3d, 0x76, 0x55, 0x3c, 0xf9, 0x26, 0xa4, 0x12, 0xff, 0x17, 0x3e, 0xd9, 0x87, 0xbc, 0x88,
0xff, 0x16, 0x12, 0x2f, 0xd6, 0x72, 0xa7, 0xf0, 0xba, 0x04, 0xa4, 0xe8, 0x72, 0x1f, 0x14, 0x5b,
0x6b, 0x10, 0xe4, 0xb3, 0x98, 0xa4, 0x7e, 0x19, 0x39, 0xed, 0x93, 0xa5, 0xdd, 0xa6, 0xc1, 0xfd,
0x74, 0x17, 0xc6, 0xce, 0xa9, 0x98, 0x70, 0x22, 0x79, 0xdc, 0x27, 0xe1, 0x9f, 0xc1, 0xf6, 0x64,
0x2b, 0xc3, 0x9e, 0x95, 0x38, 0x48, 0x96, 0x35, 0xe9, 0x87, 0xf0, 0x1e, 0xfd, 0xb5, 0xac, 0x33,
0x5b, 0x6b, 0xd9, 0x44, 0x79, 0x50, 0x23, 0x0e, 0xef, 0x81, 0x92, 0xd5, 0x91, 0x24, 0x7c, 0x23,
0xf2, 0xaa, 0x4f, 0xbf, 0x49, 0x85, 0x61, 0xb3, 0xfd, 0x60, 0x3f, 0xee, 0x6e, 0xfc, 0x39, 0x1a,
0xbd, 0xfc, 0xfb, 0x39, 0x85, 0xdb, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x63, 0x88, 0x87, 0xa4,
0x2c, 0x03, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// PredictionServiceClient is the client API for PredictionService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type PredictionServiceClient interface {
// Classify.
Classify(ctx context.Context, in *ClassificationRequest, opts ...grpc.CallOption) (*ClassificationResponse, error)
// Regress.
Regress(ctx context.Context, in *RegressionRequest, opts ...grpc.CallOption) (*RegressionResponse, error)
// Predict -- provides access to loaded TensorFlow model.
Predict(ctx context.Context, in *PredictRequest, opts ...grpc.CallOption) (*PredictResponse, error)
// MultiInference API for multi-headed models.
MultiInference(ctx context.Context, in *MultiInferenceRequest, opts ...grpc.CallOption) (*MultiInferenceResponse, error)
// GetModelMetadata - provides access to metadata for loaded models.
GetModelMetadata(ctx context.Context, in *GetModelMetadataRequest, opts ...grpc.CallOption) (*GetModelMetadataResponse, error)
}
type predictionServiceClient struct {
cc *grpc.ClientConn
}
func NewPredictionServiceClient(cc *grpc.ClientConn) PredictionServiceClient {
return &predictionServiceClient{cc}
}
func (c *predictionServiceClient) Classify(ctx context.Context, in *ClassificationRequest, opts ...grpc.CallOption) (*ClassificationResponse, error) {
out := new(ClassificationResponse)
err := c.cc.Invoke(ctx, "/tensorflow.serving.PredictionService/Classify", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *predictionServiceClient) Regress(ctx context.Context, in *RegressionRequest, opts ...grpc.CallOption) (*RegressionResponse, error) {
out := new(RegressionResponse)
err := c.cc.Invoke(ctx, "/tensorflow.serving.PredictionService/Regress", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *predictionServiceClient) Predict(ctx context.Context, in *PredictRequest, opts ...grpc.CallOption) (*PredictResponse, error) {
out := new(PredictResponse)
err := c.cc.Invoke(ctx, "/tensorflow.serving.PredictionService/Predict", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *predictionServiceClient) MultiInference(ctx context.Context, in *MultiInferenceRequest, opts ...grpc.CallOption) (*MultiInferenceResponse, error) {
out := new(MultiInferenceResponse)
err := c.cc.Invoke(ctx, "/tensorflow.serving.PredictionService/MultiInference", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *predictionServiceClient) GetModelMetadata(ctx context.Context, in *GetModelMetadataRequest, opts ...grpc.CallOption) (*GetModelMetadataResponse, error) {
out := new(GetModelMetadataResponse)
err := c.cc.Invoke(ctx, "/tensorflow.serving.PredictionService/GetModelMetadata", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// PredictionServiceServer is the server API for PredictionService service.
type PredictionServiceServer interface {
// Classify.
Classify(context.Context, *ClassificationRequest) (*ClassificationResponse, error)
// Regress.
Regress(context.Context, *RegressionRequest) (*RegressionResponse, error)
// Predict -- provides access to loaded TensorFlow model.
Predict(context.Context, *PredictRequest) (*PredictResponse, error)
// MultiInference API for multi-headed models.
MultiInference(context.Context, *MultiInferenceRequest) (*MultiInferenceResponse, error)
// GetModelMetadata - provides access to metadata for loaded models.
GetModelMetadata(context.Context, *GetModelMetadataRequest) (*GetModelMetadataResponse, error)
}
// UnimplementedPredictionServiceServer can be embedded to have forward compatible implementations.
type UnimplementedPredictionServiceServer struct {
}
func (*UnimplementedPredictionServiceServer) Classify(ctx context.Context, req *ClassificationRequest) (*ClassificationResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Classify not implemented")
}
func (*UnimplementedPredictionServiceServer) Regress(ctx context.Context, req *RegressionRequest) (*RegressionResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Regress not implemented")
}
func (*UnimplementedPredictionServiceServer) Predict(ctx context.Context, req *PredictRequest) (*PredictResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Predict not implemented")
}
func (*UnimplementedPredictionServiceServer) MultiInference(ctx context.Context, req *MultiInferenceRequest) (*MultiInferenceResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method MultiInference not implemented")
}
func (*UnimplementedPredictionServiceServer) GetModelMetadata(ctx context.Context, req *GetModelMetadataRequest) (*GetModelMetadataResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetModelMetadata not implemented")
}
func RegisterPredictionServiceServer(s *grpc.Server, srv PredictionServiceServer) {
s.RegisterService(&_PredictionService_serviceDesc, srv)
}
func _PredictionService_Classify_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ClassificationRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(PredictionServiceServer).Classify(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/tensorflow.serving.PredictionService/Classify",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PredictionServiceServer).Classify(ctx, req.(*ClassificationRequest))
}
return interceptor(ctx, in, info, handler)
}
func _PredictionService_Regress_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(RegressionRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(PredictionServiceServer).Regress(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/tensorflow.serving.PredictionService/Regress",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PredictionServiceServer).Regress(ctx, req.(*RegressionRequest))
}
return interceptor(ctx, in, info, handler)
}
func _PredictionService_Predict_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(PredictRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(PredictionServiceServer).Predict(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/tensorflow.serving.PredictionService/Predict",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PredictionServiceServer).Predict(ctx, req.(*PredictRequest))
}
return interceptor(ctx, in, info, handler)
}
func _PredictionService_MultiInference_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(MultiInferenceRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(PredictionServiceServer).MultiInference(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/tensorflow.serving.PredictionService/MultiInference",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PredictionServiceServer).MultiInference(ctx, req.(*MultiInferenceRequest))
}
return interceptor(ctx, in, info, handler)
}
func _PredictionService_GetModelMetadata_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetModelMetadataRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(PredictionServiceServer).GetModelMetadata(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/tensorflow.serving.PredictionService/GetModelMetadata",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PredictionServiceServer).GetModelMetadata(ctx, req.(*GetModelMetadataRequest))
}
return interceptor(ctx, in, info, handler)
}
var _PredictionService_serviceDesc = grpc.ServiceDesc{
ServiceName: "tensorflow.serving.PredictionService",
HandlerType: (*PredictionServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Classify",
Handler: _PredictionService_Classify_Handler,
},
{
MethodName: "Regress",
Handler: _PredictionService_Regress_Handler,
},
{
MethodName: "Predict",
Handler: _PredictionService_Predict_Handler,
},
{
MethodName: "MultiInference",
Handler: _PredictionService_MultiInference_Handler,
},
{
MethodName: "GetModelMetadata",
Handler: _PredictionService_GetModelMetadata_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "tensorflow_serving/apis/prediction_service.proto",
}