-
Notifications
You must be signed in to change notification settings - Fork 235
/
azure.synapse.spark.models.SparkBatchJob.yml
263 lines (251 loc) · 8.02 KB
/
azure.synapse.spark.models.SparkBatchJob.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
### YamlMime:PythonClass
uid: azure.synapse.spark.models.SparkBatchJob
name: SparkBatchJob
fullName: azure.synapse.spark.models.SparkBatchJob
module: azure.synapse.spark.models
inheritances:
- msrest.serialization.Model
summary: 'SparkBatchJob.
All required parameters must be populated in order to send to Azure.'
constructor:
syntax: 'SparkBatchJob(*, id: int, livy_info: SparkBatchJobState | None = None,
name: str | None = None, workspace_name: str | None = None, spark_pool_name: str
| None = None, submitter_name: str | None = None, submitter_id: str | None = None,
artifact_id: str | None = None, job_type: str | SparkJobType | None = None, result:
str | SparkBatchJobResultType | None = None, scheduler: SparkScheduler | None
= None, plugin: SparkServicePlugin | None = None, errors: List[SparkServiceError]
| None = None, tags: Dict[str, str] | None = None, app_id: str | None = None,
app_info: Dict[str, str] | None = None, state: str | LivyStates | None = None,
log_lines: List[str] | None = None, **kwargs)'
parameters:
- name: livy_info
isRequired: true
types:
- <xref:azure.synapse.spark.models.SparkBatchJobState>
- name: name
description: The batch name.
isRequired: true
types:
- <xref:str>
- name: workspace_name
description: The workspace name.
isRequired: true
types:
- <xref:str>
- name: spark_pool_name
description: The Spark pool name.
isRequired: true
types:
- <xref:str>
- name: submitter_name
description: The submitter name.
isRequired: true
types:
- <xref:str>
- name: submitter_id
description: The submitter identifier.
isRequired: true
types:
- <xref:str>
- name: artifact_id
description: The artifact identifier.
isRequired: true
types:
- <xref:str>
- name: job_type
description: 'The job type. Possible values include: "SparkBatch", "SparkSession".'
isRequired: true
types:
- <xref:str>
- <xref:azure.synapse.spark.models.SparkJobType>
- name: result
description: 'The Spark batch job result. Possible values include: "Uncertain",
"Succeeded",
"Failed", "Cancelled".'
isRequired: true
types:
- <xref:str>
- <xref:azure.synapse.spark.models.SparkBatchJobResultType>
- name: scheduler
description: The scheduler information.
isRequired: true
types:
- <xref:azure.synapse.spark.models.SparkScheduler>
- name: plugin
description: The plugin information.
isRequired: true
types:
- <xref:azure.synapse.spark.models.SparkServicePlugin>
- name: errors
description: The error information.
isRequired: true
types:
- <xref:list>[<xref:azure.synapse.spark.models.SparkServiceError>]
- name: tags
description: A set of tags. The tags.
isRequired: true
types:
- <xref:dict>[<xref:str>, <xref:str>]
- name: id
description: Required. The session Id.
isRequired: true
types:
- <xref:int>
- name: app_id
description: The application id of this session.
isRequired: true
types:
- <xref:str>
- name: app_info
description: The detailed application info.
isRequired: true
types:
- <xref:dict>[<xref:str>, <xref:str>]
- name: state
description: 'The batch state. Possible values include: "not_started", "starting",
"idle",
"busy", "shutting_down", "error", "dead", "killed", "success", "running", "recovering".'
isRequired: true
types:
- <xref:str>
- <xref:azure.synapse.spark.models.LivyStates>
- name: log_lines
description: The log lines.
isRequired: true
types:
- <xref:list>[<xref:str>]
keywordOnlyParameters:
- name: id
isRequired: true
- name: livy_info
isRequired: true
- name: name
isRequired: true
- name: workspace_name
isRequired: true
- name: spark_pool_name
isRequired: true
- name: submitter_name
isRequired: true
- name: submitter_id
isRequired: true
- name: artifact_id
isRequired: true
- name: job_type
isRequired: true
- name: result
isRequired: true
- name: scheduler
isRequired: true
- name: plugin
isRequired: true
- name: errors
isRequired: true
- name: tags
isRequired: true
- name: app_id
isRequired: true
- name: app_info
isRequired: true
- name: state
isRequired: true
- name: log_lines
isRequired: true
methods:
- uid: azure.synapse.spark.models.SparkBatchJob.as_dict
name: as_dict
summary: "Return a dict that can be JSONify using json.dump.\n\nAdvanced usage might\
\ optionally use a callback as parameter:\n\nKey is the attribute name used in\
\ Python. Attr_desc\nis a dict of metadata. Currently contains 'type' with the\n\
msrest type and 'key' with the RestAPI encoded key.\nValue is the current value\
\ in this object.\n\nThe string returned will be used to serialize the key.\n\
If the return type is a list, this is considered hierarchical\nresult dict.\n\n\
See the three examples in this file:\n\n* attribute_transformer \n\n* full_restapi_key_transformer\
\ \n\n* last_restapi_key_transformer \n\nIf you want XML serialization, you can\
\ pass the kwargs is_xml=True."
signature: as_dict(keep_readonly=True, key_transformer=<function attribute_transformer>,
**kwargs)
parameters:
- name: key_transformer
description: A key transformer function.
types:
- <xref:function>
- name: keep_readonly
defaultValue: 'True'
return:
description: A dict JSON compatible object
types:
- <xref:dict>
- uid: azure.synapse.spark.models.SparkBatchJob.deserialize
name: deserialize
summary: Parse a str using the RestAPI syntax and return a model.
signature: deserialize(data, content_type=None)
parameters:
- name: data
description: A str using RestAPI structure. JSON by default.
isRequired: true
types:
- <xref:str>
- name: content_type
description: JSON by default, set application/xml if XML.
defaultValue: None
types:
- <xref:str>
return:
description: An instance of this model
exceptions:
- type: DeserializationError if something went wrong
- uid: azure.synapse.spark.models.SparkBatchJob.enable_additional_properties_sending
name: enable_additional_properties_sending
signature: enable_additional_properties_sending()
- uid: azure.synapse.spark.models.SparkBatchJob.from_dict
name: from_dict
summary: 'Parse a dict using given key extractor return a model.
By default consider key
extractors (rest_key_case_insensitive_extractor, attribute_key_case_insensitive_extractor
and last_rest_key_case_insensitive_extractor)'
signature: from_dict(data, key_extractors=None, content_type=None)
parameters:
- name: data
description: A dict using RestAPI structure
isRequired: true
types:
- <xref:dict>
- name: content_type
description: JSON by default, set application/xml if XML.
defaultValue: None
types:
- <xref:str>
- name: key_extractors
defaultValue: None
return:
description: An instance of this model
exceptions:
- type: DeserializationError if something went wrong
- uid: azure.synapse.spark.models.SparkBatchJob.is_xml_model
name: is_xml_model
signature: is_xml_model()
- uid: azure.synapse.spark.models.SparkBatchJob.serialize
name: serialize
summary: 'Return the JSON that would be sent to azure from this model.
This is an alias to *as_dict(full_restapi_key_transformer, keep_readonly=False)*.
If you want XML serialization, you can pass the kwargs is_xml=True.'
signature: serialize(keep_readonly=False, **kwargs)
parameters:
- name: keep_readonly
description: If you want to serialize the readonly attributes
defaultValue: 'False'
types:
- <xref:bool>
return:
description: A dict JSON compatible object
types:
- <xref:dict>
- uid: azure.synapse.spark.models.SparkBatchJob.validate
name: validate
summary: Validate this model recursively and return a list of ValidationError.
signature: validate()
return:
description: A list of validation error
types:
- <xref:list>