-
Notifications
You must be signed in to change notification settings - Fork 272
/
get_model.go
106 lines (97 loc) · 3.47 KB
/
get_model.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
package nlp_automl
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
//
//http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License.
//
// Code generated by Alibaba Cloud SDK Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
)
// GetModel invokes the nlp_automl.GetModel API synchronously
func (client *Client) GetModel(request *GetModelRequest) (response *GetModelResponse, err error) {
response = CreateGetModelResponse()
err = client.DoAction(request, response)
return
}
// GetModelWithChan invokes the nlp_automl.GetModel API asynchronously
func (client *Client) GetModelWithChan(request *GetModelRequest) (<-chan *GetModelResponse, <-chan error) {
responseChan := make(chan *GetModelResponse, 1)
errChan := make(chan error, 1)
err := client.AddAsyncTask(func() {
defer close(responseChan)
defer close(errChan)
response, err := client.GetModel(request)
if err != nil {
errChan <- err
} else {
responseChan <- response
}
})
if err != nil {
errChan <- err
close(responseChan)
close(errChan)
}
return responseChan, errChan
}
// GetModelWithCallback invokes the nlp_automl.GetModel API asynchronously
func (client *Client) GetModelWithCallback(request *GetModelRequest, callback func(response *GetModelResponse, err error)) <-chan int {
result := make(chan int, 1)
err := client.AddAsyncTask(func() {
var response *GetModelResponse
var err error
defer close(result)
response, err = client.GetModel(request)
callback(response, err)
result <- 1
})
if err != nil {
defer close(result)
callback(nil, err)
result <- 0
}
return result
}
// GetModelRequest is the request struct for api GetModel
type GetModelRequest struct {
*requests.RpcRequest
Product string `position:"Body" name:"Product"`
ModelId requests.Integer `position:"Body" name:"ModelId"`
ProjectId requests.Integer `position:"Body" name:"ProjectId"`
ModelVersion string `position:"Body" name:"ModelVersion"`
}
// GetModelResponse is the response struct for api GetModel
type GetModelResponse struct {
*responses.BaseResponse
RequestId string `json:"RequestId" xml:"RequestId"`
Data map[string]interface{} `json:"Data" xml:"Data"`
Code int `json:"Code" xml:"Code"`
Message string `json:"Message" xml:"Message"`
Success bool `json:"Success" xml:"Success"`
}
// CreateGetModelRequest creates a request to invoke GetModel API
func CreateGetModelRequest() (request *GetModelRequest) {
request = &GetModelRequest{
RpcRequest: &requests.RpcRequest{},
}
request.InitWithApiInfo("nlp-automl", "2019-11-11", "GetModel", "nlpautoml", "openAPI")
request.Method = requests.POST
return
}
// CreateGetModelResponse creates a response to parse from GetModel response
func CreateGetModelResponse() (response *GetModelResponse) {
response = &GetModelResponse{
BaseResponse: &responses.BaseResponse{},
}
return
}