Skip to content
Permalink
Fetching contributors…
Cannot retrieve contributors at this time
2245 lines (2068 sloc) 87.6 KB
package job
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"context"
"encoding/json"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/date"
"github.com/Azure/go-autorest/autorest/to"
"github.com/Azure/go-autorest/tracing"
"github.com/satori/go.uuid"
"net/http"
)
// The package's fully qualified name.
const fqdn = "github.com/Azure/azure-sdk-for-go/services/preview/datalake/analytics/2017-09-01-preview/job"
// CompileMode enumerates the values for compile mode.
type CompileMode string
const (
// Full ...
Full CompileMode = "Full"
// Semantic ...
Semantic CompileMode = "Semantic"
// SingleBox ...
SingleBox CompileMode = "SingleBox"
)
// PossibleCompileModeValues returns an array of possible values for the CompileMode const type.
func PossibleCompileModeValues() []CompileMode {
return []CompileMode{Full, Semantic, SingleBox}
}
// ResourceType enumerates the values for resource type.
type ResourceType string
const (
// JobManagerResource ...
JobManagerResource ResourceType = "JobManagerResource"
// JobManagerResourceInUserFolder ...
JobManagerResourceInUserFolder ResourceType = "JobManagerResourceInUserFolder"
// StatisticsResource ...
StatisticsResource ResourceType = "StatisticsResource"
// StatisticsResourceInUserFolder ...
StatisticsResourceInUserFolder ResourceType = "StatisticsResourceInUserFolder"
// VertexResource ...
VertexResource ResourceType = "VertexResource"
// VertexResourceInUserFolder ...
VertexResourceInUserFolder ResourceType = "VertexResourceInUserFolder"
)
// PossibleResourceTypeValues returns an array of possible values for the ResourceType const type.
func PossibleResourceTypeValues() []ResourceType {
return []ResourceType{JobManagerResource, JobManagerResourceInUserFolder, StatisticsResource, StatisticsResourceInUserFolder, VertexResource, VertexResourceInUserFolder}
}
// Result enumerates the values for result.
type Result string
const (
// Cancelled ...
Cancelled Result = "Cancelled"
// Failed ...
Failed Result = "Failed"
// None ...
None Result = "None"
// Succeeded ...
Succeeded Result = "Succeeded"
)
// PossibleResultValues returns an array of possible values for the Result const type.
func PossibleResultValues() []Result {
return []Result{Cancelled, Failed, None, Succeeded}
}
// SeverityTypes enumerates the values for severity types.
type SeverityTypes string
const (
// Deprecated ...
Deprecated SeverityTypes = "Deprecated"
// Error ...
Error SeverityTypes = "Error"
// Info ...
Info SeverityTypes = "Info"
// SevereWarning ...
SevereWarning SeverityTypes = "SevereWarning"
// UserWarning ...
UserWarning SeverityTypes = "UserWarning"
// Warning ...
Warning SeverityTypes = "Warning"
)
// PossibleSeverityTypesValues returns an array of possible values for the SeverityTypes const type.
func PossibleSeverityTypesValues() []SeverityTypes {
return []SeverityTypes{Deprecated, Error, Info, SevereWarning, UserWarning, Warning}
}
// State enumerates the values for state.
type State string
const (
// StateAccepted ...
StateAccepted State = "Accepted"
// StateCompiling ...
StateCompiling State = "Compiling"
// StateEnded ...
StateEnded State = "Ended"
// StateFinalizing ...
StateFinalizing State = "Finalizing"
// StateNew ...
StateNew State = "New"
// StatePaused ...
StatePaused State = "Paused"
// StateQueued ...
StateQueued State = "Queued"
// StateRunning ...
StateRunning State = "Running"
// StateScheduling ...
StateScheduling State = "Scheduling"
// StateStarting ...
StateStarting State = "Starting"
// StateWaitingForCapacity ...
StateWaitingForCapacity State = "WaitingForCapacity"
// StateYielded ...
StateYielded State = "Yielded"
)
// PossibleStateValues returns an array of possible values for the State const type.
func PossibleStateValues() []State {
return []State{StateAccepted, StateCompiling, StateEnded, StateFinalizing, StateNew, StatePaused, StateQueued, StateRunning, StateScheduling, StateStarting, StateWaitingForCapacity, StateYielded}
}
// Type enumerates the values for type.
type Type string
const (
// TypeHive ...
TypeHive Type = "Hive"
// TypeJobProperties ...
TypeJobProperties Type = "JobProperties"
// TypeScope ...
TypeScope Type = "Scope"
// TypeUSQL ...
TypeUSQL Type = "USql"
)
// PossibleTypeValues returns an array of possible values for the Type const type.
func PossibleTypeValues() []Type {
return []Type{TypeHive, TypeJobProperties, TypeScope, TypeUSQL}
}
// TypeBasicCreateJobProperties enumerates the values for type basic create job properties.
type TypeBasicCreateJobProperties string
const (
// TypeBasicCreateJobPropertiesTypeCreateJobProperties ...
TypeBasicCreateJobPropertiesTypeCreateJobProperties TypeBasicCreateJobProperties = "CreateJobProperties"
// TypeBasicCreateJobPropertiesTypeScope ...
TypeBasicCreateJobPropertiesTypeScope TypeBasicCreateJobProperties = "Scope"
// TypeBasicCreateJobPropertiesTypeUSQL ...
TypeBasicCreateJobPropertiesTypeUSQL TypeBasicCreateJobProperties = "USql"
)
// PossibleTypeBasicCreateJobPropertiesValues returns an array of possible values for the TypeBasicCreateJobProperties const type.
func PossibleTypeBasicCreateJobPropertiesValues() []TypeBasicCreateJobProperties {
return []TypeBasicCreateJobProperties{TypeBasicCreateJobPropertiesTypeCreateJobProperties, TypeBasicCreateJobPropertiesTypeScope, TypeBasicCreateJobPropertiesTypeUSQL}
}
// TypeEnum enumerates the values for type enum.
type TypeEnum string
const (
// Hive ...
Hive TypeEnum = "Hive"
// Scope ...
Scope TypeEnum = "Scope"
// USQL ...
USQL TypeEnum = "USql"
)
// PossibleTypeEnumValues returns an array of possible values for the TypeEnum const type.
func PossibleTypeEnumValues() []TypeEnum {
return []TypeEnum{Hive, Scope, USQL}
}
// BaseJobParameters data Lake Analytics Job Parameters base class for build and submit.
type BaseJobParameters struct {
// Type - The job type of the current job (Hive, USql, or Scope (for internal use only)). Possible values include: 'USQL', 'Hive', 'Scope'
Type TypeEnum `json:"type,omitempty"`
// Properties - The job specific properties.
Properties BasicCreateJobProperties `json:"properties,omitempty"`
}
// UnmarshalJSON is the custom unmarshaler for BaseJobParameters struct.
func (bjp *BaseJobParameters) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
return err
}
for k, v := range m {
switch k {
case "type":
if v != nil {
var typeVar TypeEnum
err = json.Unmarshal(*v, &typeVar)
if err != nil {
return err
}
bjp.Type = typeVar
}
case "properties":
if v != nil {
properties, err := unmarshalBasicCreateJobProperties(*v)
if err != nil {
return err
}
bjp.Properties = properties
}
}
}
return nil
}
// BuildJobParameters the parameters used to build a new Data Lake Analytics job.
type BuildJobParameters struct {
// Name - The friendly name of the job to build.
Name *string `json:"name,omitempty"`
// Type - The job type of the current job (Hive, USql, or Scope (for internal use only)). Possible values include: 'USQL', 'Hive', 'Scope'
Type TypeEnum `json:"type,omitempty"`
// Properties - The job specific properties.
Properties BasicCreateJobProperties `json:"properties,omitempty"`
}
// UnmarshalJSON is the custom unmarshaler for BuildJobParameters struct.
func (bjp *BuildJobParameters) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
return err
}
for k, v := range m {
switch k {
case "name":
if v != nil {
var name string
err = json.Unmarshal(*v, &name)
if err != nil {
return err
}
bjp.Name = &name
}
case "type":
if v != nil {
var typeVar TypeEnum
err = json.Unmarshal(*v, &typeVar)
if err != nil {
return err
}
bjp.Type = typeVar
}
case "properties":
if v != nil {
properties, err := unmarshalBasicCreateJobProperties(*v)
if err != nil {
return err
}
bjp.Properties = properties
}
}
}
return nil
}
// CancelFuture an abstraction for monitoring and retrieving the results of a long-running operation.
type CancelFuture struct {
azure.Future
}
// Result returns the result of the asynchronous operation.
// If the operation has not completed it will return an error.
func (future *CancelFuture) Result(client Client) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
err = autorest.NewErrorWithError(err, "job.CancelFuture", "Result", future.Response(), "Polling failure")
return
}
if !done {
err = azure.NewAsyncOpIncompleteError("job.CancelFuture")
return
}
ar.Response = future.Response()
return
}
// CreateJobParameters the parameters used to submit a new Data Lake Analytics job.
type CreateJobParameters struct {
// Name - The friendly name of the job to submit.
Name *string `json:"name,omitempty"`
// DegreeOfParallelism - The degree of parallelism to use for this job. At most one of degreeOfParallelism and degreeOfParallelismPercent should be specified. If none, a default value of 1 will be used for degreeOfParallelism.
DegreeOfParallelism *int32 `json:"degreeOfParallelism,omitempty"`
// DegreeOfParallelismPercent - the degree of parallelism in percentage used for this job. At most one of degreeOfParallelism and degreeOfParallelismPercent should be specified. If none, a default value of 1 will be used for degreeOfParallelism.
DegreeOfParallelismPercent *float64 `json:"degreeOfParallelismPercent,omitempty"`
// Priority - The priority value to use for the current job. Lower numbers have a higher priority. By default, a job has a priority of 1000. This must be greater than 0.
Priority *int32 `json:"priority,omitempty"`
// LogFilePatterns - The list of log file name patterns to find in the logFolder. '*' is the only matching character allowed. Example format: jobExecution*.log or *mylog*.txt
LogFilePatterns *[]string `json:"logFilePatterns,omitempty"`
// Related - The recurring job relationship information properties.
Related *RelationshipProperties `json:"related,omitempty"`
// Type - The job type of the current job (Hive, USql, or Scope (for internal use only)). Possible values include: 'USQL', 'Hive', 'Scope'
Type TypeEnum `json:"type,omitempty"`
// Properties - The job specific properties.
Properties BasicCreateJobProperties `json:"properties,omitempty"`
}
// UnmarshalJSON is the custom unmarshaler for CreateJobParameters struct.
func (cjp *CreateJobParameters) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
return err
}
for k, v := range m {
switch k {
case "name":
if v != nil {
var name string
err = json.Unmarshal(*v, &name)
if err != nil {
return err
}
cjp.Name = &name
}
case "degreeOfParallelism":
if v != nil {
var degreeOfParallelism int32
err = json.Unmarshal(*v, &degreeOfParallelism)
if err != nil {
return err
}
cjp.DegreeOfParallelism = &degreeOfParallelism
}
case "degreeOfParallelismPercent":
if v != nil {
var degreeOfParallelismPercent float64
err = json.Unmarshal(*v, &degreeOfParallelismPercent)
if err != nil {
return err
}
cjp.DegreeOfParallelismPercent = &degreeOfParallelismPercent
}
case "priority":
if v != nil {
var priority int32
err = json.Unmarshal(*v, &priority)
if err != nil {
return err
}
cjp.Priority = &priority
}
case "logFilePatterns":
if v != nil {
var logFilePatterns []string
err = json.Unmarshal(*v, &logFilePatterns)
if err != nil {
return err
}
cjp.LogFilePatterns = &logFilePatterns
}
case "related":
if v != nil {
var related RelationshipProperties
err = json.Unmarshal(*v, &related)
if err != nil {
return err
}
cjp.Related = &related
}
case "type":
if v != nil {
var typeVar TypeEnum
err = json.Unmarshal(*v, &typeVar)
if err != nil {
return err
}
cjp.Type = typeVar
}
case "properties":
if v != nil {
properties, err := unmarshalBasicCreateJobProperties(*v)
if err != nil {
return err
}
cjp.Properties = properties
}
}
}
return nil
}
// BasicCreateJobProperties the common Data Lake Analytics job properties for job submission.
type BasicCreateJobProperties interface {
AsCreateUSQLJobProperties() (*CreateUSQLJobProperties, bool)
AsCreateScopeJobProperties() (*CreateScopeJobProperties, bool)
AsCreateJobProperties() (*CreateJobProperties, bool)
}
// CreateJobProperties the common Data Lake Analytics job properties for job submission.
type CreateJobProperties struct {
// RuntimeVersion - The runtime version of the Data Lake Analytics engine to use for the specific type of job being run.
RuntimeVersion *string `json:"runtimeVersion,omitempty"`
// Script - The script to run. Please note that the maximum script size is 3 MB.
Script *string `json:"script,omitempty"`
// Type - Possible values include: 'TypeBasicCreateJobPropertiesTypeCreateJobProperties', 'TypeBasicCreateJobPropertiesTypeUSQL', 'TypeBasicCreateJobPropertiesTypeScope'
Type TypeBasicCreateJobProperties `json:"type,omitempty"`
}
func unmarshalBasicCreateJobProperties(body []byte) (BasicCreateJobProperties, error) {
var m map[string]interface{}
err := json.Unmarshal(body, &m)
if err != nil {
return nil, err
}
switch m["type"] {
case string(TypeBasicCreateJobPropertiesTypeUSQL):
var cusjp CreateUSQLJobProperties
err := json.Unmarshal(body, &cusjp)
return cusjp, err
case string(TypeBasicCreateJobPropertiesTypeScope):
var csjp CreateScopeJobProperties
err := json.Unmarshal(body, &csjp)
return csjp, err
default:
var cjp CreateJobProperties
err := json.Unmarshal(body, &cjp)
return cjp, err
}
}
func unmarshalBasicCreateJobPropertiesArray(body []byte) ([]BasicCreateJobProperties, error) {
var rawMessages []*json.RawMessage
err := json.Unmarshal(body, &rawMessages)
if err != nil {
return nil, err
}
cjpArray := make([]BasicCreateJobProperties, len(rawMessages))
for index, rawMessage := range rawMessages {
cjp, err := unmarshalBasicCreateJobProperties(*rawMessage)
if err != nil {
return nil, err
}
cjpArray[index] = cjp
}
return cjpArray, nil
}
// MarshalJSON is the custom marshaler for CreateJobProperties.
func (cjp CreateJobProperties) MarshalJSON() ([]byte, error) {
cjp.Type = TypeBasicCreateJobPropertiesTypeCreateJobProperties
objectMap := make(map[string]interface{})
if cjp.RuntimeVersion != nil {
objectMap["runtimeVersion"] = cjp.RuntimeVersion
}
if cjp.Script != nil {
objectMap["script"] = cjp.Script
}
if cjp.Type != "" {
objectMap["type"] = cjp.Type
}
return json.Marshal(objectMap)
}
// AsCreateUSQLJobProperties is the BasicCreateJobProperties implementation for CreateJobProperties.
func (cjp CreateJobProperties) AsCreateUSQLJobProperties() (*CreateUSQLJobProperties, bool) {
return nil, false
}
// AsCreateScopeJobProperties is the BasicCreateJobProperties implementation for CreateJobProperties.
func (cjp CreateJobProperties) AsCreateScopeJobProperties() (*CreateScopeJobProperties, bool) {
return nil, false
}
// AsCreateJobProperties is the BasicCreateJobProperties implementation for CreateJobProperties.
func (cjp CreateJobProperties) AsCreateJobProperties() (*CreateJobProperties, bool) {
return &cjp, true
}
// AsBasicCreateJobProperties is the BasicCreateJobProperties implementation for CreateJobProperties.
func (cjp CreateJobProperties) AsBasicCreateJobProperties() (BasicCreateJobProperties, bool) {
return &cjp, true
}
// CreateScopeJobParameters the parameters used to submit a new Data Lake Analytics Scope job. (Only for
// use internally with Scope job type.)
type CreateScopeJobParameters struct {
// Tags - The key-value pairs used to add additional metadata to the job information.
Tags map[string]*string `json:"tags"`
// Name - The friendly name of the job to submit.
Name *string `json:"name,omitempty"`
// DegreeOfParallelism - The degree of parallelism to use for this job. At most one of degreeOfParallelism and degreeOfParallelismPercent should be specified. If none, a default value of 1 will be used for degreeOfParallelism.
DegreeOfParallelism *int32 `json:"degreeOfParallelism,omitempty"`
// DegreeOfParallelismPercent - the degree of parallelism in percentage used for this job. At most one of degreeOfParallelism and degreeOfParallelismPercent should be specified. If none, a default value of 1 will be used for degreeOfParallelism.
DegreeOfParallelismPercent *float64 `json:"degreeOfParallelismPercent,omitempty"`
// Priority - The priority value to use for the current job. Lower numbers have a higher priority. By default, a job has a priority of 1000. This must be greater than 0.
Priority *int32 `json:"priority,omitempty"`
// LogFilePatterns - The list of log file name patterns to find in the logFolder. '*' is the only matching character allowed. Example format: jobExecution*.log or *mylog*.txt
LogFilePatterns *[]string `json:"logFilePatterns,omitempty"`
// Related - The recurring job relationship information properties.
Related *RelationshipProperties `json:"related,omitempty"`
// Type - The job type of the current job (Hive, USql, or Scope (for internal use only)). Possible values include: 'USQL', 'Hive', 'Scope'
Type TypeEnum `json:"type,omitempty"`
// Properties - The job specific properties.
Properties BasicCreateJobProperties `json:"properties,omitempty"`
}
// MarshalJSON is the custom marshaler for CreateScopeJobParameters.
func (csjp CreateScopeJobParameters) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
if csjp.Tags != nil {
objectMap["tags"] = csjp.Tags
}
if csjp.Name != nil {
objectMap["name"] = csjp.Name
}
if csjp.DegreeOfParallelism != nil {
objectMap["degreeOfParallelism"] = csjp.DegreeOfParallelism
}
if csjp.DegreeOfParallelismPercent != nil {
objectMap["degreeOfParallelismPercent"] = csjp.DegreeOfParallelismPercent
}
if csjp.Priority != nil {
objectMap["priority"] = csjp.Priority
}
if csjp.LogFilePatterns != nil {
objectMap["logFilePatterns"] = csjp.LogFilePatterns
}
if csjp.Related != nil {
objectMap["related"] = csjp.Related
}
if csjp.Type != "" {
objectMap["type"] = csjp.Type
}
objectMap["properties"] = csjp.Properties
return json.Marshal(objectMap)
}
// UnmarshalJSON is the custom unmarshaler for CreateScopeJobParameters struct.
func (csjp *CreateScopeJobParameters) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
return err
}
for k, v := range m {
switch k {
case "tags":
if v != nil {
var tags map[string]*string
err = json.Unmarshal(*v, &tags)
if err != nil {
return err
}
csjp.Tags = tags
}
case "name":
if v != nil {
var name string
err = json.Unmarshal(*v, &name)
if err != nil {
return err
}
csjp.Name = &name
}
case "degreeOfParallelism":
if v != nil {
var degreeOfParallelism int32
err = json.Unmarshal(*v, &degreeOfParallelism)
if err != nil {
return err
}
csjp.DegreeOfParallelism = &degreeOfParallelism
}
case "degreeOfParallelismPercent":
if v != nil {
var degreeOfParallelismPercent float64
err = json.Unmarshal(*v, &degreeOfParallelismPercent)
if err != nil {
return err
}
csjp.DegreeOfParallelismPercent = &degreeOfParallelismPercent
}
case "priority":
if v != nil {
var priority int32
err = json.Unmarshal(*v, &priority)
if err != nil {
return err
}
csjp.Priority = &priority
}
case "logFilePatterns":
if v != nil {
var logFilePatterns []string
err = json.Unmarshal(*v, &logFilePatterns)
if err != nil {
return err
}
csjp.LogFilePatterns = &logFilePatterns
}
case "related":
if v != nil {
var related RelationshipProperties
err = json.Unmarshal(*v, &related)
if err != nil {
return err
}
csjp.Related = &related
}
case "type":
if v != nil {
var typeVar TypeEnum
err = json.Unmarshal(*v, &typeVar)
if err != nil {
return err
}
csjp.Type = typeVar
}
case "properties":
if v != nil {
properties, err := unmarshalBasicCreateJobProperties(*v)
if err != nil {
return err
}
csjp.Properties = properties
}
}
}
return nil
}
// CreateScopeJobProperties scope job properties used when submitting Scope jobs. (Only for use internally
// with Scope job type.)
type CreateScopeJobProperties struct {
// Resources - The list of resources that are required by the job.
Resources *[]ScopeJobResource `json:"resources,omitempty"`
// Notifier - The list of email addresses, separated by semi-colons, to notify when the job reaches a terminal state.
Notifier *string `json:"notifier,omitempty"`
// RuntimeVersion - The runtime version of the Data Lake Analytics engine to use for the specific type of job being run.
RuntimeVersion *string `json:"runtimeVersion,omitempty"`
// Script - The script to run. Please note that the maximum script size is 3 MB.
Script *string `json:"script,omitempty"`
// Type - Possible values include: 'TypeBasicCreateJobPropertiesTypeCreateJobProperties', 'TypeBasicCreateJobPropertiesTypeUSQL', 'TypeBasicCreateJobPropertiesTypeScope'
Type TypeBasicCreateJobProperties `json:"type,omitempty"`
}
// MarshalJSON is the custom marshaler for CreateScopeJobProperties.
func (csjp CreateScopeJobProperties) MarshalJSON() ([]byte, error) {
csjp.Type = TypeBasicCreateJobPropertiesTypeScope
objectMap := make(map[string]interface{})
if csjp.Resources != nil {
objectMap["resources"] = csjp.Resources
}
if csjp.Notifier != nil {
objectMap["notifier"] = csjp.Notifier
}
if csjp.RuntimeVersion != nil {
objectMap["runtimeVersion"] = csjp.RuntimeVersion
}
if csjp.Script != nil {
objectMap["script"] = csjp.Script
}
if csjp.Type != "" {
objectMap["type"] = csjp.Type
}
return json.Marshal(objectMap)
}
// AsCreateUSQLJobProperties is the BasicCreateJobProperties implementation for CreateScopeJobProperties.
func (csjp CreateScopeJobProperties) AsCreateUSQLJobProperties() (*CreateUSQLJobProperties, bool) {
return nil, false
}
// AsCreateScopeJobProperties is the BasicCreateJobProperties implementation for CreateScopeJobProperties.
func (csjp CreateScopeJobProperties) AsCreateScopeJobProperties() (*CreateScopeJobProperties, bool) {
return &csjp, true
}
// AsCreateJobProperties is the BasicCreateJobProperties implementation for CreateScopeJobProperties.
func (csjp CreateScopeJobProperties) AsCreateJobProperties() (*CreateJobProperties, bool) {
return nil, false
}
// AsBasicCreateJobProperties is the BasicCreateJobProperties implementation for CreateScopeJobProperties.
func (csjp CreateScopeJobProperties) AsBasicCreateJobProperties() (BasicCreateJobProperties, bool) {
return &csjp, true
}
// CreateUSQLJobProperties u-SQL job properties used when submitting U-SQL jobs.
type CreateUSQLJobProperties struct {
// CompileMode - The specific compilation mode for the job used during execution. If this is not specified during submission, the server will determine the optimal compilation mode. Possible values include: 'Semantic', 'Full', 'SingleBox'
CompileMode CompileMode `json:"compileMode,omitempty"`
// RuntimeVersion - The runtime version of the Data Lake Analytics engine to use for the specific type of job being run.
RuntimeVersion *string `json:"runtimeVersion,omitempty"`
// Script - The script to run. Please note that the maximum script size is 3 MB.
Script *string `json:"script,omitempty"`
// Type - Possible values include: 'TypeBasicCreateJobPropertiesTypeCreateJobProperties', 'TypeBasicCreateJobPropertiesTypeUSQL', 'TypeBasicCreateJobPropertiesTypeScope'
Type TypeBasicCreateJobProperties `json:"type,omitempty"`
}
// MarshalJSON is the custom marshaler for CreateUSQLJobProperties.
func (cusjp CreateUSQLJobProperties) MarshalJSON() ([]byte, error) {
cusjp.Type = TypeBasicCreateJobPropertiesTypeUSQL
objectMap := make(map[string]interface{})
if cusjp.CompileMode != "" {
objectMap["compileMode"] = cusjp.CompileMode
}
if cusjp.RuntimeVersion != nil {
objectMap["runtimeVersion"] = cusjp.RuntimeVersion
}
if cusjp.Script != nil {
objectMap["script"] = cusjp.Script
}
if cusjp.Type != "" {
objectMap["type"] = cusjp.Type
}
return json.Marshal(objectMap)
}
// AsCreateUSQLJobProperties is the BasicCreateJobProperties implementation for CreateUSQLJobProperties.
func (cusjp CreateUSQLJobProperties) AsCreateUSQLJobProperties() (*CreateUSQLJobProperties, bool) {
return &cusjp, true
}
// AsCreateScopeJobProperties is the BasicCreateJobProperties implementation for CreateUSQLJobProperties.
func (cusjp CreateUSQLJobProperties) AsCreateScopeJobProperties() (*CreateScopeJobProperties, bool) {
return nil, false
}
// AsCreateJobProperties is the BasicCreateJobProperties implementation for CreateUSQLJobProperties.
func (cusjp CreateUSQLJobProperties) AsCreateJobProperties() (*CreateJobProperties, bool) {
return nil, false
}
// AsBasicCreateJobProperties is the BasicCreateJobProperties implementation for CreateUSQLJobProperties.
func (cusjp CreateUSQLJobProperties) AsBasicCreateJobProperties() (BasicCreateJobProperties, bool) {
return &cusjp, true
}
// DataPath a Data Lake Analytics job data path item.
type DataPath struct {
autorest.Response `json:"-"`
// JobID - READ-ONLY; The ID of the job this data is for.
JobID *uuid.UUID `json:"jobId,omitempty"`
// Command - READ-ONLY; The command that this job data relates to.
Command *string `json:"command,omitempty"`
// Paths - READ-ONLY; The list of paths to all of the job data.
Paths *[]string `json:"paths,omitempty"`
}
// Diagnostics error diagnostic information for failed jobs.
type Diagnostics struct {
// Message - READ-ONLY; The error message.
Message *string `json:"message,omitempty"`
// Severity - READ-ONLY; The severity of the error. Possible values include: 'Warning', 'Error', 'Info', 'SevereWarning', 'Deprecated', 'UserWarning'
Severity SeverityTypes `json:"severity,omitempty"`
// LineNumber - READ-ONLY; The line number the error occurred on.
LineNumber *int32 `json:"lineNumber,omitempty"`
// ColumnNumber - READ-ONLY; The column where the error occurred.
ColumnNumber *int32 `json:"columnNumber,omitempty"`
// Start - READ-ONLY; The starting index of the error.
Start *int32 `json:"start,omitempty"`
// End - READ-ONLY; The ending index of the error.
End *int32 `json:"end,omitempty"`
}
// ErrorDetails the Data Lake Analytics job error details.
type ErrorDetails struct {
// ErrorID - READ-ONLY; The specific identifier for the type of error encountered in the job.
ErrorID *string `json:"errorId,omitempty"`
// Severity - READ-ONLY; The severity level of the failure. Possible values include: 'Warning', 'Error', 'Info', 'SevereWarning', 'Deprecated', 'UserWarning'
Severity SeverityTypes `json:"severity,omitempty"`
// Source - READ-ONLY; The ultimate source of the failure (usually either SYSTEM or USER).
Source *string `json:"source,omitempty"`
// Message - READ-ONLY; The user friendly error message for the failure.
Message *string `json:"message,omitempty"`
// Description - READ-ONLY; The error message description.
Description *string `json:"description,omitempty"`
// Details - READ-ONLY; The details of the error message.
Details *string `json:"details,omitempty"`
// LineNumber - READ-ONLY; The specific line number in the job where the error occurred.
LineNumber *int32 `json:"lineNumber,omitempty"`
// StartOffset - READ-ONLY; The start offset in the job where the error was found
StartOffset *int32 `json:"startOffset,omitempty"`
// EndOffset - READ-ONLY; The end offset in the job where the error was found.
EndOffset *int32 `json:"endOffset,omitempty"`
// Resolution - READ-ONLY; The recommended resolution for the failure, if any.
Resolution *string `json:"resolution,omitempty"`
// FilePath - READ-ONLY; The path to any supplemental error files, if any.
FilePath *string `json:"filePath,omitempty"`
// HelpLink - READ-ONLY; The link to MSDN or Azure help for this type of error, if any.
HelpLink *string `json:"helpLink,omitempty"`
// InternalDiagnostics - READ-ONLY; The internal diagnostic stack trace if the user requesting the job error details has sufficient permissions it will be retrieved, otherwise it will be empty.
InternalDiagnostics *string `json:"internalDiagnostics,omitempty"`
// InnerError - READ-ONLY; The inner error of this specific job error message, if any.
InnerError *InnerError `json:"innerError,omitempty"`
}
// HiveJobProperties hive job properties used when retrieving Hive jobs.
type HiveJobProperties struct {
// LogsLocation - READ-ONLY; The Hive logs location.
LogsLocation *string `json:"logsLocation,omitempty"`
// OutputLocation - READ-ONLY; The location of Hive job output files (both execution output and results).
OutputLocation *string `json:"outputLocation,omitempty"`
// StatementCount - READ-ONLY; The number of statements that will be run based on the script.
StatementCount *int32 `json:"statementCount,omitempty"`
// ExecutedStatementCount - READ-ONLY; The number of statements that have been run based on the script.
ExecutedStatementCount *int32 `json:"executedStatementCount,omitempty"`
// RuntimeVersion - The runtime version of the Data Lake Analytics engine to use for the specific type of job being run.
RuntimeVersion *string `json:"runtimeVersion,omitempty"`
// Script - The script to run. Please note that the maximum script size is 3 MB.
Script *string `json:"script,omitempty"`
// Type - Possible values include: 'TypeJobProperties', 'TypeUSQL', 'TypeHive', 'TypeScope'
Type Type `json:"type,omitempty"`
}
// MarshalJSON is the custom marshaler for HiveJobProperties.
func (hjp HiveJobProperties) MarshalJSON() ([]byte, error) {
hjp.Type = TypeHive
objectMap := make(map[string]interface{})
if hjp.RuntimeVersion != nil {
objectMap["runtimeVersion"] = hjp.RuntimeVersion
}
if hjp.Script != nil {
objectMap["script"] = hjp.Script
}
if hjp.Type != "" {
objectMap["type"] = hjp.Type
}
return json.Marshal(objectMap)
}
// AsUSQLJobProperties is the BasicProperties implementation for HiveJobProperties.
func (hjp HiveJobProperties) AsUSQLJobProperties() (*USQLJobProperties, bool) {
return nil, false
}
// AsHiveJobProperties is the BasicProperties implementation for HiveJobProperties.
func (hjp HiveJobProperties) AsHiveJobProperties() (*HiveJobProperties, bool) {
return &hjp, true
}
// AsScopeJobProperties is the BasicProperties implementation for HiveJobProperties.
func (hjp HiveJobProperties) AsScopeJobProperties() (*ScopeJobProperties, bool) {
return nil, false
}
// AsProperties is the BasicProperties implementation for HiveJobProperties.
func (hjp HiveJobProperties) AsProperties() (*Properties, bool) {
return nil, false
}
// AsBasicProperties is the BasicProperties implementation for HiveJobProperties.
func (hjp HiveJobProperties) AsBasicProperties() (BasicProperties, bool) {
return &hjp, true
}
// InfoListResult list of JobInfo items.
type InfoListResult struct {
autorest.Response `json:"-"`
// Value - READ-ONLY; The list of JobInfo items.
Value *[]InformationBasic `json:"value,omitempty"`
// NextLink - READ-ONLY; The link (url) to the next page of results.
NextLink *string `json:"nextLink,omitempty"`
}
// InfoListResultIterator provides access to a complete listing of InformationBasic values.
type InfoListResultIterator struct {
i int
page InfoListResultPage
}
// NextWithContext advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
func (iter *InfoListResultIterator) NextWithContext(ctx context.Context) (err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/InfoListResultIterator.NextWithContext")
defer func() {
sc := -1
if iter.Response().Response.Response != nil {
sc = iter.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
iter.i++
if iter.i < len(iter.page.Values()) {
return nil
}
err = iter.page.NextWithContext(ctx)
if err != nil {
iter.i--
return err
}
iter.i = 0
return nil
}
// Next advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
// Deprecated: Use NextWithContext() instead.
func (iter *InfoListResultIterator) Next() error {
return iter.NextWithContext(context.Background())
}
// NotDone returns true if the enumeration should be started or is not yet complete.
func (iter InfoListResultIterator) NotDone() bool {
return iter.page.NotDone() && iter.i < len(iter.page.Values())
}
// Response returns the raw server response from the last page request.
func (iter InfoListResultIterator) Response() InfoListResult {
return iter.page.Response()
}
// Value returns the current value or a zero-initialized value if the
// iterator has advanced beyond the end of the collection.
func (iter InfoListResultIterator) Value() InformationBasic {
if !iter.page.NotDone() {
return InformationBasic{}
}
return iter.page.Values()[iter.i]
}
// Creates a new instance of the InfoListResultIterator type.
func NewInfoListResultIterator(page InfoListResultPage) InfoListResultIterator {
return InfoListResultIterator{page: page}
}
// IsEmpty returns true if the ListResult contains no values.
func (ilr InfoListResult) IsEmpty() bool {
return ilr.Value == nil || len(*ilr.Value) == 0
}
// infoListResultPreparer prepares a request to retrieve the next set of results.
// It returns nil if no more results exist.
func (ilr InfoListResult) infoListResultPreparer(ctx context.Context) (*http.Request, error) {
if ilr.NextLink == nil || len(to.String(ilr.NextLink)) < 1 {
return nil, nil
}
return autorest.Prepare((&http.Request{}).WithContext(ctx),
autorest.AsJSON(),
autorest.AsGet(),
autorest.WithBaseURL(to.String(ilr.NextLink)))
}
// InfoListResultPage contains a page of InformationBasic values.
type InfoListResultPage struct {
fn func(context.Context, InfoListResult) (InfoListResult, error)
ilr InfoListResult
}
// NextWithContext advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
func (page *InfoListResultPage) NextWithContext(ctx context.Context) (err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/InfoListResultPage.NextWithContext")
defer func() {
sc := -1
if page.Response().Response.Response != nil {
sc = page.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
next, err := page.fn(ctx, page.ilr)
if err != nil {
return err
}
page.ilr = next
return nil
}
// Next advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
// Deprecated: Use NextWithContext() instead.
func (page *InfoListResultPage) Next() error {
return page.NextWithContext(context.Background())
}
// NotDone returns true if the page enumeration should be started or is not yet complete.
func (page InfoListResultPage) NotDone() bool {
return !page.ilr.IsEmpty()
}
// Response returns the raw server response from the last page request.
func (page InfoListResultPage) Response() InfoListResult {
return page.ilr
}
// Values returns the slice of values for the current page or nil if there are no values.
func (page InfoListResultPage) Values() []InformationBasic {
if page.ilr.IsEmpty() {
return nil
}
return *page.ilr.Value
}
// Creates a new instance of the InfoListResultPage type.
func NewInfoListResultPage(getNextPage func(context.Context, InfoListResult) (InfoListResult, error)) InfoListResultPage {
return InfoListResultPage{fn: getNextPage}
}
// Information the extended Data Lake Analytics job information properties returned when retrieving a
// specific job.
type Information struct {
autorest.Response `json:"-"`
// ErrorMessage - READ-ONLY; The error message details for the job, if the job failed.
ErrorMessage *[]ErrorDetails `json:"errorMessage,omitempty"`
// StateAuditRecords - READ-ONLY; The job state audit records, indicating when various operations have been performed on this job.
StateAuditRecords *[]StateAuditRecord `json:"stateAuditRecords,omitempty"`
// Properties - The job specific properties.
Properties BasicProperties `json:"properties,omitempty"`
// JobID - READ-ONLY; The job's unique identifier (a GUID).
JobID *uuid.UUID `json:"jobId,omitempty"`
// Name - The friendly name of the job.
Name *string `json:"name,omitempty"`
// Type - The job type of the current job (Hive, USql, or Scope (for internal use only)). Possible values include: 'USQL', 'Hive', 'Scope'
Type TypeEnum `json:"type,omitempty"`
// Submitter - READ-ONLY; The user or account that submitted the job.
Submitter *string `json:"submitter,omitempty"`
// DegreeOfParallelism - The degree of parallelism used for this job.
DegreeOfParallelism *int32 `json:"degreeOfParallelism,omitempty"`
// DegreeOfParallelismPercent - READ-ONLY; the degree of parallelism in percentage used for this job.
DegreeOfParallelismPercent *float64 `json:"degreeOfParallelismPercent,omitempty"`
// Priority - The priority value for the current job. Lower numbers have a higher priority. By default, a job has a priority of 1000. This must be greater than 0.
Priority *int32 `json:"priority,omitempty"`
// SubmitTime - READ-ONLY; The time the job was submitted to the service.
SubmitTime *date.Time `json:"submitTime,omitempty"`
// StartTime - READ-ONLY; The start time of the job.
StartTime *date.Time `json:"startTime,omitempty"`
// EndTime - READ-ONLY; The completion time of the job.
EndTime *date.Time `json:"endTime,omitempty"`
// State - READ-ONLY; The job state. When the job is in the Ended state, refer to Result and ErrorMessage for details. Possible values include: 'StateAccepted', 'StateCompiling', 'StateEnded', 'StateNew', 'StateQueued', 'StateRunning', 'StateScheduling', 'StateStarting', 'StatePaused', 'StateWaitingForCapacity', 'StateYielded', 'StateFinalizing'
State State `json:"state,omitempty"`
// Result - READ-ONLY; The result of job execution or the current result of the running job. Possible values include: 'None', 'Succeeded', 'Cancelled', 'Failed'
Result Result `json:"result,omitempty"`
// LogFolder - READ-ONLY; The log folder path to use in the following format: adl://<accountName>.azuredatalakestore.net/system/jobservice/jobs/Usql/2016/03/13/17/18/5fe51957-93bc-4de0-8ddc-c5a4753b068b/logs/.
LogFolder *string `json:"logFolder,omitempty"`
// LogFilePatterns - The list of log file name patterns to find in the logFolder. '*' is the only matching character allowed. Example format: jobExecution*.log or *mylog*.txt
LogFilePatterns *[]string `json:"logFilePatterns,omitempty"`
// Related - The recurring job relationship information properties.
Related *RelationshipProperties `json:"related,omitempty"`
// Tags - The key-value pairs used to add additional metadata to the job information. (Only for use internally with Scope job type.)
Tags map[string]*string `json:"tags"`
// HierarchyQueueNode - READ-ONLY; the name of hierarchy queue node this job is assigned to, Null if job has not been assigned yet or the account doesn't have hierarchy queue.
HierarchyQueueNode *string `json:"hierarchyQueueNode,omitempty"`
}
// MarshalJSON is the custom marshaler for Information.
func (i Information) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
objectMap["properties"] = i.Properties
if i.Name != nil {
objectMap["name"] = i.Name
}
if i.Type != "" {
objectMap["type"] = i.Type
}
if i.DegreeOfParallelism != nil {
objectMap["degreeOfParallelism"] = i.DegreeOfParallelism
}
if i.Priority != nil {
objectMap["priority"] = i.Priority
}
if i.LogFilePatterns != nil {
objectMap["logFilePatterns"] = i.LogFilePatterns
}
if i.Related != nil {
objectMap["related"] = i.Related
}
if i.Tags != nil {
objectMap["tags"] = i.Tags
}
return json.Marshal(objectMap)
}
// UnmarshalJSON is the custom unmarshaler for Information struct.
func (i *Information) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
return err
}
for k, v := range m {
switch k {
case "errorMessage":
if v != nil {
var errorMessage []ErrorDetails
err = json.Unmarshal(*v, &errorMessage)
if err != nil {
return err
}
i.ErrorMessage = &errorMessage
}
case "stateAuditRecords":
if v != nil {
var stateAuditRecords []StateAuditRecord
err = json.Unmarshal(*v, &stateAuditRecords)
if err != nil {
return err
}
i.StateAuditRecords = &stateAuditRecords
}
case "properties":
if v != nil {
properties, err := unmarshalBasicProperties(*v)
if err != nil {
return err
}
i.Properties = properties
}
case "jobId":
if v != nil {
var jobID uuid.UUID
err = json.Unmarshal(*v, &jobID)
if err != nil {
return err
}
i.JobID = &jobID
}
case "name":
if v != nil {
var name string
err = json.Unmarshal(*v, &name)
if err != nil {
return err
}
i.Name = &name
}
case "type":
if v != nil {
var typeVar TypeEnum
err = json.Unmarshal(*v, &typeVar)
if err != nil {
return err
}
i.Type = typeVar
}
case "submitter":
if v != nil {
var submitter string
err = json.Unmarshal(*v, &submitter)
if err != nil {
return err
}
i.Submitter = &submitter
}
case "degreeOfParallelism":
if v != nil {
var degreeOfParallelism int32
err = json.Unmarshal(*v, &degreeOfParallelism)
if err != nil {
return err
}
i.DegreeOfParallelism = &degreeOfParallelism
}
case "degreeOfParallelismPercent":
if v != nil {
var degreeOfParallelismPercent float64
err = json.Unmarshal(*v, &degreeOfParallelismPercent)
if err != nil {
return err
}
i.DegreeOfParallelismPercent = &degreeOfParallelismPercent
}
case "priority":
if v != nil {
var priority int32
err = json.Unmarshal(*v, &priority)
if err != nil {
return err
}
i.Priority = &priority
}
case "submitTime":
if v != nil {
var submitTime date.Time
err = json.Unmarshal(*v, &submitTime)
if err != nil {
return err
}
i.SubmitTime = &submitTime
}
case "startTime":
if v != nil {
var startTime date.Time
err = json.Unmarshal(*v, &startTime)
if err != nil {
return err
}
i.StartTime = &startTime
}
case "endTime":
if v != nil {
var endTime date.Time
err = json.Unmarshal(*v, &endTime)
if err != nil {
return err
}
i.EndTime = &endTime
}
case "state":
if v != nil {
var state State
err = json.Unmarshal(*v, &state)
if err != nil {
return err
}
i.State = state
}
case "result":
if v != nil {
var resultVar Result
err = json.Unmarshal(*v, &resultVar)
if err != nil {
return err
}
i.Result = resultVar
}
case "logFolder":
if v != nil {
var logFolder string
err = json.Unmarshal(*v, &logFolder)
if err != nil {
return err
}
i.LogFolder = &logFolder
}
case "logFilePatterns":
if v != nil {
var logFilePatterns []string
err = json.Unmarshal(*v, &logFilePatterns)
if err != nil {
return err
}
i.LogFilePatterns = &logFilePatterns
}
case "related":
if v != nil {
var related RelationshipProperties
err = json.Unmarshal(*v, &related)
if err != nil {
return err
}
i.Related = &related
}
case "tags":
if v != nil {
var tags map[string]*string
err = json.Unmarshal(*v, &tags)
if err != nil {
return err
}
i.Tags = tags
}
case "hierarchyQueueNode":
if v != nil {
var hierarchyQueueNode string
err = json.Unmarshal(*v, &hierarchyQueueNode)
if err != nil {
return err
}
i.HierarchyQueueNode = &hierarchyQueueNode
}
}
}
return nil
}
// InformationBasic the common Data Lake Analytics job information properties.
type InformationBasic struct {
// JobID - READ-ONLY; The job's unique identifier (a GUID).
JobID *uuid.UUID `json:"jobId,omitempty"`
// Name - The friendly name of the job.
Name *string `json:"name,omitempty"`
// Type - The job type of the current job (Hive, USql, or Scope (for internal use only)). Possible values include: 'USQL', 'Hive', 'Scope'
Type TypeEnum `json:"type,omitempty"`
// Submitter - READ-ONLY; The user or account that submitted the job.
Submitter *string `json:"submitter,omitempty"`
// DegreeOfParallelism - The degree of parallelism used for this job.
DegreeOfParallelism *int32 `json:"degreeOfParallelism,omitempty"`
// DegreeOfParallelismPercent - READ-ONLY; the degree of parallelism in percentage used for this job.
DegreeOfParallelismPercent *float64 `json:"degreeOfParallelismPercent,omitempty"`
// Priority - The priority value for the current job. Lower numbers have a higher priority. By default, a job has a priority of 1000. This must be greater than 0.
Priority *int32 `json:"priority,omitempty"`
// SubmitTime - READ-ONLY; The time the job was submitted to the service.
SubmitTime *date.Time `json:"submitTime,omitempty"`
// StartTime - READ-ONLY; The start time of the job.
StartTime *date.Time `json:"startTime,omitempty"`
// EndTime - READ-ONLY; The completion time of the job.
EndTime *date.Time `json:"endTime,omitempty"`
// State - READ-ONLY; The job state. When the job is in the Ended state, refer to Result and ErrorMessage for details. Possible values include: 'StateAccepted', 'StateCompiling', 'StateEnded', 'StateNew', 'StateQueued', 'StateRunning', 'StateScheduling', 'StateStarting', 'StatePaused', 'StateWaitingForCapacity', 'StateYielded', 'StateFinalizing'
State State `json:"state,omitempty"`
// Result - READ-ONLY; The result of job execution or the current result of the running job. Possible values include: 'None', 'Succeeded', 'Cancelled', 'Failed'
Result Result `json:"result,omitempty"`
// LogFolder - READ-ONLY; The log folder path to use in the following format: adl://<accountName>.azuredatalakestore.net/system/jobservice/jobs/Usql/2016/03/13/17/18/5fe51957-93bc-4de0-8ddc-c5a4753b068b/logs/.
LogFolder *string `json:"logFolder,omitempty"`
// LogFilePatterns - The list of log file name patterns to find in the logFolder. '*' is the only matching character allowed. Example format: jobExecution*.log or *mylog*.txt
LogFilePatterns *[]string `json:"logFilePatterns,omitempty"`
// Related - The recurring job relationship information properties.
Related *RelationshipProperties `json:"related,omitempty"`
// Tags - The key-value pairs used to add additional metadata to the job information. (Only for use internally with Scope job type.)
Tags map[string]*string `json:"tags"`
// HierarchyQueueNode - READ-ONLY; the name of hierarchy queue node this job is assigned to, Null if job has not been assigned yet or the account doesn't have hierarchy queue.
HierarchyQueueNode *string `json:"hierarchyQueueNode,omitempty"`
}
// MarshalJSON is the custom marshaler for InformationBasic.
func (ib InformationBasic) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
if ib.Name != nil {
objectMap["name"] = ib.Name
}
if ib.Type != "" {
objectMap["type"] = ib.Type
}
if ib.DegreeOfParallelism != nil {
objectMap["degreeOfParallelism"] = ib.DegreeOfParallelism
}
if ib.Priority != nil {
objectMap["priority"] = ib.Priority
}
if ib.LogFilePatterns != nil {
objectMap["logFilePatterns"] = ib.LogFilePatterns
}
if ib.Related != nil {
objectMap["related"] = ib.Related
}
if ib.Tags != nil {
objectMap["tags"] = ib.Tags
}
return json.Marshal(objectMap)
}
// InnerError the Data Lake Analytics job error details.
type InnerError struct {
// ErrorID - READ-ONLY; The specific identifier for the type of error encountered in the job.
ErrorID *string `json:"errorId,omitempty"`
// Severity - READ-ONLY; The severity level of the failure. Possible values include: 'Warning', 'Error', 'Info', 'SevereWarning', 'Deprecated', 'UserWarning'
Severity SeverityTypes `json:"severity,omitempty"`
// Source - READ-ONLY; The ultimate source of the failure (usually either SYSTEM or USER).
Source *string `json:"source,omitempty"`
// Message - READ-ONLY; The user friendly error message for the failure.
Message *string `json:"message,omitempty"`
// Description - READ-ONLY; The error message description.
Description *string `json:"description,omitempty"`
// Details - READ-ONLY; The details of the error message.
Details *string `json:"details,omitempty"`
// DiagnosticCode - READ-ONLY; The diagnostic error code.
DiagnosticCode *int32 `json:"diagnosticCode,omitempty"`
// Component - READ-ONLY; The component that failed.
Component *string `json:"component,omitempty"`
// Resolution - READ-ONLY; The recommended resolution for the failure, if any.
Resolution *string `json:"resolution,omitempty"`
// HelpLink - READ-ONLY; The link to MSDN or Azure help for this type of error, if any.
HelpLink *string `json:"helpLink,omitempty"`
// InternalDiagnostics - READ-ONLY; The internal diagnostic stack trace if the user requesting the job error details has sufficient permissions it will be retrieved, otherwise it will be empty.
InternalDiagnostics *string `json:"internalDiagnostics,omitempty"`
// InnerError - READ-ONLY; The inner error of this specific job error message, if any.
InnerError *InnerError `json:"innerError,omitempty"`
}
// PipelineInformation job Pipeline Information, showing the relationship of jobs and recurrences of those
// jobs in a pipeline.
type PipelineInformation struct {
autorest.Response `json:"-"`
// PipelineID - READ-ONLY; The job relationship pipeline identifier (a GUID).
PipelineID *uuid.UUID `json:"pipelineId,omitempty"`
// PipelineName - READ-ONLY; The friendly name of the job relationship pipeline, which does not need to be unique.
PipelineName *string `json:"pipelineName,omitempty"`
// PipelineURI - READ-ONLY; The pipeline uri, unique, links to the originating service for this pipeline.
PipelineURI *string `json:"pipelineUri,omitempty"`
// NumJobsFailed - READ-ONLY; The number of jobs in this pipeline that have failed.
NumJobsFailed *int32 `json:"numJobsFailed,omitempty"`
// NumJobsCanceled - READ-ONLY; The number of jobs in this pipeline that have been canceled.
NumJobsCanceled *int32 `json:"numJobsCanceled,omitempty"`
// NumJobsSucceeded - READ-ONLY; The number of jobs in this pipeline that have succeeded.
NumJobsSucceeded *int32 `json:"numJobsSucceeded,omitempty"`
// AuHoursFailed - READ-ONLY; The number of job execution hours that resulted in failed jobs.
AuHoursFailed *float64 `json:"auHoursFailed,omitempty"`
// AuHoursCanceled - READ-ONLY; The number of job execution hours that resulted in canceled jobs.
AuHoursCanceled *float64 `json:"auHoursCanceled,omitempty"`
// AuHoursSucceeded - READ-ONLY; The number of job execution hours that resulted in successful jobs.
AuHoursSucceeded *float64 `json:"auHoursSucceeded,omitempty"`
// LastSubmitTime - READ-ONLY; The last time a job in this pipeline was submitted.
LastSubmitTime *date.Time `json:"lastSubmitTime,omitempty"`
// Runs - READ-ONLY; The list of recurrence identifiers representing each run of this pipeline.
Runs *[]PipelineRunInformation `json:"runs,omitempty"`
// Recurrences - READ-ONLY; The list of recurrence identifiers representing each run of this pipeline.
Recurrences *[]uuid.UUID `json:"recurrences,omitempty"`
}
// PipelineInformationListResult list of job pipeline information items.
type PipelineInformationListResult struct {
autorest.Response `json:"-"`
// Value - READ-ONLY; The list of job pipeline information items.
Value *[]PipelineInformation `json:"value,omitempty"`
// NextLink - READ-ONLY; The link (url) to the next page of results.
NextLink *string `json:"nextLink,omitempty"`
}
// PipelineInformationListResultIterator provides access to a complete listing of PipelineInformation
// values.
type PipelineInformationListResultIterator struct {
i int
page PipelineInformationListResultPage
}
// NextWithContext advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
func (iter *PipelineInformationListResultIterator) NextWithContext(ctx context.Context) (err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/PipelineInformationListResultIterator.NextWithContext")
defer func() {
sc := -1
if iter.Response().Response.Response != nil {
sc = iter.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
iter.i++
if iter.i < len(iter.page.Values()) {
return nil
}
err = iter.page.NextWithContext(ctx)
if err != nil {
iter.i--
return err
}
iter.i = 0
return nil
}
// Next advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
// Deprecated: Use NextWithContext() instead.
func (iter *PipelineInformationListResultIterator) Next() error {
return iter.NextWithContext(context.Background())
}
// NotDone returns true if the enumeration should be started or is not yet complete.
func (iter PipelineInformationListResultIterator) NotDone() bool {
return iter.page.NotDone() && iter.i < len(iter.page.Values())
}
// Response returns the raw server response from the last page request.
func (iter PipelineInformationListResultIterator) Response() PipelineInformationListResult {
return iter.page.Response()
}
// Value returns the current value or a zero-initialized value if the
// iterator has advanced beyond the end of the collection.
func (iter PipelineInformationListResultIterator) Value() PipelineInformation {
if !iter.page.NotDone() {
return PipelineInformation{}
}
return iter.page.Values()[iter.i]
}
// Creates a new instance of the PipelineInformationListResultIterator type.
func NewPipelineInformationListResultIterator(page PipelineInformationListResultPage) PipelineInformationListResultIterator {
return PipelineInformationListResultIterator{page: page}
}
// IsEmpty returns true if the ListResult contains no values.
func (pilr PipelineInformationListResult) IsEmpty() bool {
return pilr.Value == nil || len(*pilr.Value) == 0
}
// pipelineInformationListResultPreparer prepares a request to retrieve the next set of results.
// It returns nil if no more results exist.
func (pilr PipelineInformationListResult) pipelineInformationListResultPreparer(ctx context.Context) (*http.Request, error) {
if pilr.NextLink == nil || len(to.String(pilr.NextLink)) < 1 {
return nil, nil
}
return autorest.Prepare((&http.Request{}).WithContext(ctx),
autorest.AsJSON(),
autorest.AsGet(),
autorest.WithBaseURL(to.String(pilr.NextLink)))
}
// PipelineInformationListResultPage contains a page of PipelineInformation values.
type PipelineInformationListResultPage struct {
fn func(context.Context, PipelineInformationListResult) (PipelineInformationListResult, error)
pilr PipelineInformationListResult
}
// NextWithContext advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
func (page *PipelineInformationListResultPage) NextWithContext(ctx context.Context) (err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/PipelineInformationListResultPage.NextWithContext")
defer func() {
sc := -1
if page.Response().Response.Response != nil {
sc = page.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
next, err := page.fn(ctx, page.pilr)
if err != nil {
return err
}
page.pilr = next
return nil
}
// Next advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
// Deprecated: Use NextWithContext() instead.
func (page *PipelineInformationListResultPage) Next() error {
return page.NextWithContext(context.Background())
}
// NotDone returns true if the page enumeration should be started or is not yet complete.
func (page PipelineInformationListResultPage) NotDone() bool {
return !page.pilr.IsEmpty()
}
// Response returns the raw server response from the last page request.
func (page PipelineInformationListResultPage) Response() PipelineInformationListResult {
return page.pilr
}
// Values returns the slice of values for the current page or nil if there are no values.
func (page PipelineInformationListResultPage) Values() []PipelineInformation {
if page.pilr.IsEmpty() {
return nil
}
return *page.pilr.Value
}
// Creates a new instance of the PipelineInformationListResultPage type.
func NewPipelineInformationListResultPage(getNextPage func(context.Context, PipelineInformationListResult) (PipelineInformationListResult, error)) PipelineInformationListResultPage {
return PipelineInformationListResultPage{fn: getNextPage}
}
// PipelineRunInformation run info for a specific job pipeline.
type PipelineRunInformation struct {
// RunID - READ-ONLY; The run identifier of an instance of pipeline executions (a GUID).
RunID *uuid.UUID `json:"runId,omitempty"`
// LastSubmitTime - READ-ONLY; The time this instance was last submitted.
LastSubmitTime *date.Time `json:"lastSubmitTime,omitempty"`
}
// BasicProperties the common Data Lake Analytics job properties.
type BasicProperties interface {
AsUSQLJobProperties() (*USQLJobProperties, bool)
AsHiveJobProperties() (*HiveJobProperties, bool)
AsScopeJobProperties() (*ScopeJobProperties, bool)
AsProperties() (*Properties, bool)
}
// Properties the common Data Lake Analytics job properties.
type Properties struct {
// RuntimeVersion - The runtime version of the Data Lake Analytics engine to use for the specific type of job being run.
RuntimeVersion *string `json:"runtimeVersion,omitempty"`
// Script - The script to run. Please note that the maximum script size is 3 MB.
Script *string `json:"script,omitempty"`
// Type - Possible values include: 'TypeJobProperties', 'TypeUSQL', 'TypeHive', 'TypeScope'
Type Type `json:"type,omitempty"`
}
func unmarshalBasicProperties(body []byte) (BasicProperties, error) {
var m map[string]interface{}
err := json.Unmarshal(body, &m)
if err != nil {
return nil, err
}
switch m["type"] {
case string(TypeUSQL):
var usjp USQLJobProperties
err := json.Unmarshal(body, &usjp)
return usjp, err
case string(TypeHive):
var hjp HiveJobProperties
err := json.Unmarshal(body, &hjp)
return hjp, err
case string(TypeScope):
var sjp ScopeJobProperties
err := json.Unmarshal(body, &sjp)
return sjp, err
default:
var p Properties
err := json.Unmarshal(body, &p)
return p, err
}
}
func unmarshalBasicPropertiesArray(body []byte) ([]BasicProperties, error) {
var rawMessages []*json.RawMessage
err := json.Unmarshal(body, &rawMessages)
if err != nil {
return nil, err
}
pArray := make([]BasicProperties, len(rawMessages))
for index, rawMessage := range rawMessages {
p, err := unmarshalBasicProperties(*rawMessage)
if err != nil {
return nil, err
}
pArray[index] = p
}
return pArray, nil
}
// MarshalJSON is the custom marshaler for Properties.
func (p Properties) MarshalJSON() ([]byte, error) {
p.Type = TypeJobProperties
objectMap := make(map[string]interface{})
if p.RuntimeVersion != nil {
objectMap["runtimeVersion"] = p.RuntimeVersion
}
if p.Script != nil {
objectMap["script"] = p.Script
}
if p.Type != "" {
objectMap["type"] = p.Type
}
return json.Marshal(objectMap)
}
// AsUSQLJobProperties is the BasicProperties implementation for Properties.
func (p Properties) AsUSQLJobProperties() (*USQLJobProperties, bool) {
return nil, false
}
// AsHiveJobProperties is the BasicProperties implementation for Properties.
func (p Properties) AsHiveJobProperties() (*HiveJobProperties, bool) {
return nil, false
}
// AsScopeJobProperties is the BasicProperties implementation for Properties.
func (p Properties) AsScopeJobProperties() (*ScopeJobProperties, bool) {
return nil, false
}
// AsProperties is the BasicProperties implementation for Properties.
func (p Properties) AsProperties() (*Properties, bool) {
return &p, true
}
// AsBasicProperties is the BasicProperties implementation for Properties.
func (p Properties) AsBasicProperties() (BasicProperties, bool) {
return &p, true
}
// RecurrenceInformation recurrence job information for a specific recurrence.
type RecurrenceInformation struct {
autorest.Response `json:"-"`
// RecurrenceID - READ-ONLY; The recurrence identifier (a GUID), unique per activity/script, regardless of iterations. This is something to link different occurrences of the same job together.
RecurrenceID *uuid.UUID `json:"recurrenceId,omitempty"`
// RecurrenceName - READ-ONLY; The recurrence name, user friendly name for the correlation between jobs.
RecurrenceName *string `json:"recurrenceName,omitempty"`
// NumJobsFailed - READ-ONLY; The number of jobs in this recurrence that have failed.
NumJobsFailed *int32 `json:"numJobsFailed,omitempty"`
// NumJobsCanceled - READ-ONLY; The number of jobs in this recurrence that have been canceled.
NumJobsCanceled *int32 `json:"numJobsCanceled,omitempty"`
// NumJobsSucceeded - READ-ONLY; The number of jobs in this recurrence that have succeeded.
NumJobsSucceeded *int32 `json:"numJobsSucceeded,omitempty"`
// AuHoursFailed - READ-ONLY; The number of job execution hours that resulted in failed jobs.
AuHoursFailed *float64 `json:"auHoursFailed,omitempty"`
// AuHoursCanceled - READ-ONLY; The number of job execution hours that resulted in canceled jobs.
AuHoursCanceled *float64 `json:"auHoursCanceled,omitempty"`
// AuHoursSucceeded - READ-ONLY; The number of job execution hours that resulted in successful jobs.
AuHoursSucceeded *float64 `json:"auHoursSucceeded,omitempty"`
// LastSubmitTime - READ-ONLY; The last time a job in this recurrence was submitted.
LastSubmitTime *date.Time `json:"lastSubmitTime,omitempty"`
}
// RecurrenceInformationListResult list of job recurrence information items.
type RecurrenceInformationListResult struct {
autorest.Response `json:"-"`
// Value - READ-ONLY; The list of job recurrence information items.
Value *[]RecurrenceInformation `json:"value,omitempty"`
// NextLink - READ-ONLY; The link (url) to the next page of results.
NextLink *string `json:"nextLink,omitempty"`
}
// RecurrenceInformationListResultIterator provides access to a complete listing of RecurrenceInformation
// values.
type RecurrenceInformationListResultIterator struct {
i int
page RecurrenceInformationListResultPage
}
// NextWithContext advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
func (iter *RecurrenceInformationListResultIterator) NextWithContext(ctx context.Context) (err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/RecurrenceInformationListResultIterator.NextWithContext")
defer func() {
sc := -1
if iter.Response().Response.Response != nil {
sc = iter.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
iter.i++
if iter.i < len(iter.page.Values()) {
return nil
}
err = iter.page.NextWithContext(ctx)
if err != nil {
iter.i--
return err
}
iter.i = 0
return nil
}
// Next advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
// Deprecated: Use NextWithContext() instead.
func (iter *RecurrenceInformationListResultIterator) Next() error {
return iter.NextWithContext(context.Background())
}
// NotDone returns true if the enumeration should be started or is not yet complete.
func (iter RecurrenceInformationListResultIterator) NotDone() bool {
return iter.page.NotDone() && iter.i < len(iter.page.Values())
}
// Response returns the raw server response from the last page request.
func (iter RecurrenceInformationListResultIterator) Response() RecurrenceInformationListResult {
return iter.page.Response()
}
// Value returns the current value or a zero-initialized value if the
// iterator has advanced beyond the end of the collection.
func (iter RecurrenceInformationListResultIterator) Value() RecurrenceInformation {
if !iter.page.NotDone() {
return RecurrenceInformation{}
}
return iter.page.Values()[iter.i]
}
// Creates a new instance of the RecurrenceInformationListResultIterator type.
func NewRecurrenceInformationListResultIterator(page RecurrenceInformationListResultPage) RecurrenceInformationListResultIterator {
return RecurrenceInformationListResultIterator{page: page}
}
// IsEmpty returns true if the ListResult contains no values.
func (rilr RecurrenceInformationListResult) IsEmpty() bool {
return rilr.Value == nil || len(*rilr.Value) == 0
}
// recurrenceInformationListResultPreparer prepares a request to retrieve the next set of results.
// It returns nil if no more results exist.
func (rilr RecurrenceInformationListResult) recurrenceInformationListResultPreparer(ctx context.Context) (*http.Request, error) {
if rilr.NextLink == nil || len(to.String(rilr.NextLink)) < 1 {
return nil, nil
}
return autorest.Prepare((&http.Request{}).WithContext(ctx),
autorest.AsJSON(),
autorest.AsGet(),
autorest.WithBaseURL(to.String(rilr.NextLink)))
}
// RecurrenceInformationListResultPage contains a page of RecurrenceInformation values.
type RecurrenceInformationListResultPage struct {
fn func(context.Context, RecurrenceInformationListResult) (RecurrenceInformationListResult, error)
rilr RecurrenceInformationListResult
}
// NextWithContext advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
func (page *RecurrenceInformationListResultPage) NextWithContext(ctx context.Context) (err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/RecurrenceInformationListResultPage.NextWithContext")
defer func() {
sc := -1
if page.Response().Response.Response != nil {
sc = page.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
next, err := page.fn(ctx, page.rilr)
if err != nil {
return err
}
page.rilr = next
return nil
}
// Next advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
// Deprecated: Use NextWithContext() instead.
func (page *RecurrenceInformationListResultPage) Next() error {
return page.NextWithContext(context.Background())
}
// NotDone returns true if the page enumeration should be started or is not yet complete.
func (page RecurrenceInformationListResultPage) NotDone() bool {
return !page.rilr.IsEmpty()
}
// Response returns the raw server response from the last page request.
func (page RecurrenceInformationListResultPage) Response() RecurrenceInformationListResult {
return page.rilr
}
// Values returns the slice of values for the current page or nil if there are no values.
func (page RecurrenceInformationListResultPage) Values() []RecurrenceInformation {
if page.rilr.IsEmpty() {
return nil
}
return *page.rilr.Value
}
// Creates a new instance of the RecurrenceInformationListResultPage type.
func NewRecurrenceInformationListResultPage(getNextPage func(context.Context, RecurrenceInformationListResult) (RecurrenceInformationListResult, error)) RecurrenceInformationListResultPage {
return RecurrenceInformationListResultPage{fn: getNextPage}
}
// RelationshipProperties job relationship information properties including pipeline information,
// correlation information, etc.
type RelationshipProperties struct {
// PipelineID - The job relationship pipeline identifier (a GUID).
PipelineID *uuid.UUID `json:"pipelineId,omitempty"`
// PipelineName - The friendly name of the job relationship pipeline, which does not need to be unique.
PipelineName *string `json:"pipelineName,omitempty"`
// PipelineURI - The pipeline uri, unique, links to the originating service for this pipeline.
PipelineURI *string `json:"pipelineUri,omitempty"`
// RunID - The run identifier (a GUID), unique identifier of the iteration of this pipeline.
RunID *uuid.UUID `json:"runId,omitempty"`
// RecurrenceID - The recurrence identifier (a GUID), unique per activity/script, regardless of iterations. This is something to link different occurrences of the same job together.
RecurrenceID *uuid.UUID `json:"recurrenceId,omitempty"`
// RecurrenceName - The recurrence name, user friendly name for the correlation between jobs.
RecurrenceName *string `json:"recurrenceName,omitempty"`
}
// Resource the Data Lake Analytics job resources.
type Resource struct {
// Name - The name of the resource.
Name *string `json:"name,omitempty"`
// ResourcePath - The path to the resource.
ResourcePath *string `json:"resourcePath,omitempty"`
// Type - The job resource type. Possible values include: 'VertexResource', 'JobManagerResource', 'StatisticsResource', 'VertexResourceInUserFolder', 'JobManagerResourceInUserFolder', 'StatisticsResourceInUserFolder'
Type ResourceType `json:"type,omitempty"`
}
// ResourceUsageStatistics the statistics information for resource usage.
type ResourceUsageStatistics struct {
// Average - READ-ONLY; The average value.
Average *float64 `json:"average,omitempty"`
// Minimum - READ-ONLY; The minimum value.
Minimum *int64 `json:"minimum,omitempty"`
// Maximum - READ-ONLY; The maximum value.
Maximum *int64 `json:"maximum,omitempty"`
}
// ScopeJobProperties scope job properties used when submitting and retrieving Scope jobs. (Only for use
// internally with Scope job type.)
type ScopeJobProperties struct {
// Resources - READ-ONLY; The list of resources that are required by the job.
Resources *[]ScopeJobResource `json:"resources,omitempty"`
// UserAlgebraPath - READ-ONLY; The algebra file path after the job has completed.
UserAlgebraPath *string `json:"userAlgebraPath,omitempty"`
// Notifier - The list of email addresses, separated by semi-colons, to notify when the job reaches a terminal state.
Notifier *string `json:"notifier,omitempty"`
// TotalCompilationTime - READ-ONLY; The total time this job spent compiling. This value should not be set by the user and will be ignored if it is.
TotalCompilationTime *string `json:"totalCompilationTime,omitempty"`
// TotalQueuedTime - READ-ONLY; The total time this job spent queued. This value should not be set by the user and will be ignored if it is.
TotalQueuedTime *string `json:"totalQueuedTime,omitempty"`
// TotalRunningTime - READ-ONLY; The total time this job spent executing. This value should not be set by the user and will be ignored if it is.
TotalRunningTime *string `json:"totalRunningTime,omitempty"`
// TotalPausedTime - READ-ONLY; The total time this job spent paused. This value should not be set by the user and will be ignored if it is.
TotalPausedTime *string `json:"totalPausedTime,omitempty"`
// RootProcessNodeID - READ-ONLY; The ID used to identify the job manager coordinating job execution. This value should not be set by the user and will be ignored if it is.
RootProcessNodeID *string `json:"rootProcessNodeId,omitempty"`
// YarnApplicationID - READ-ONLY; The ID used to identify the yarn application executing the job. This value should not be set by the user and will be ignored if it is.
YarnApplicationID *string `json:"yarnApplicationId,omitempty"`
// RuntimeVersion - The runtime version of the Data Lake Analytics engine to use for the specific type of job being run.
RuntimeVersion *string `json:"runtimeVersion,omitempty"`
// Script - The script to run. Please note that the maximum script size is 3 MB.
Script *string `json:"script,omitempty"`
// Type - Possible values include: 'TypeJobProperties', 'TypeUSQL', 'TypeHive', 'TypeScope'
Type Type `json:"type,omitempty"`
}
// MarshalJSON is the custom marshaler for ScopeJobProperties.
func (sjp ScopeJobProperties) MarshalJSON() ([]byte, error) {
sjp.Type = TypeScope
objectMap := make(map[string]interface{})
if sjp.Notifier != nil {
objectMap["notifier"] = sjp.Notifier
}
if sjp.RuntimeVersion != nil {
objectMap["runtimeVersion"] = sjp.RuntimeVersion
}
if sjp.Script != nil {
objectMap["script"] = sjp.Script
}
if sjp.Type != "" {
objectMap["type"] = sjp.Type
}
return json.Marshal(objectMap)
}
// AsUSQLJobProperties is the BasicProperties implementation for ScopeJobProperties.
func (sjp ScopeJobProperties) AsUSQLJobProperties() (*USQLJobProperties, bool) {
return nil, false
}
// AsHiveJobProperties is the BasicProperties implementation for ScopeJobProperties.
func (sjp ScopeJobProperties) AsHiveJobProperties() (*HiveJobProperties, bool) {
return nil, false
}
// AsScopeJobProperties is the BasicProperties implementation for ScopeJobProperties.
func (sjp ScopeJobProperties) AsScopeJobProperties() (*ScopeJobProperties, bool) {
return &sjp, true
}
// AsProperties is the BasicProperties implementation for ScopeJobProperties.
func (sjp ScopeJobProperties) AsProperties() (*Properties, bool) {
return nil, false
}
// AsBasicProperties is the BasicProperties implementation for ScopeJobProperties.
func (sjp ScopeJobProperties) AsBasicProperties() (BasicProperties, bool) {
return &sjp, true
}
// ScopeJobResource the Scope job resources. (Only for use internally with Scope job type.)
type ScopeJobResource struct {
// Name - The name of the resource.
Name *string `json:"name,omitempty"`
// Path - The path to the resource.
Path *string `json:"path,omitempty"`
}
// StateAuditRecord the Data Lake Analytics job state audit records for tracking the lifecycle of a job.
type StateAuditRecord struct {
// NewState - READ-ONLY; The new state the job is in.
NewState *string `json:"newState,omitempty"`
// TimeStamp - READ-ONLY; The time stamp that the state change took place.
TimeStamp *date.Time `json:"timeStamp,omitempty"`
// RequestedByUser - READ-ONLY; The user who requests the change.
RequestedByUser *string `json:"requestedByUser,omitempty"`
// Details - READ-ONLY; The details of the audit log.
Details *string `json:"details,omitempty"`
}
// Statistics the Data Lake Analytics job execution statistics.
type Statistics struct {
autorest.Response `json:"-"`
// LastUpdateTimeUtc - READ-ONLY; The last update time for the statistics.
LastUpdateTimeUtc *date.Time `json:"lastUpdateTimeUtc,omitempty"`
// FinalizingTimeUtc - READ-ONLY; The job finalizing start time.
FinalizingTimeUtc *date.Time `json:"finalizingTimeUtc,omitempty"`
// Stages - READ-ONLY; The list of stages for the job.
Stages *[]StatisticsVertexStage `json:"stages,omitempty"`
}
// StatisticsVertex the detailed information for a vertex.
type StatisticsVertex struct {
// Name - READ-ONLY; The name of the vertex.
Name *string `json:"name,omitempty"`
// VertexID - READ-ONLY; The id of the vertex.
VertexID *uuid.UUID `json:"vertexId,omitempty"`
// ExecutionTime - READ-ONLY; The amount of execution time of the vertex.
ExecutionTime *string `json:"executionTime,omitempty"`
// DataRead - READ-ONLY; The amount of data read of the vertex, in bytes.
DataRead *int64 `json:"dataRead,omitempty"`
// PeakMemUsage - READ-ONLY; The amount of peak memory usage of the vertex, in bytes.
PeakMemUsage *int64 `json:"peakMemUsage,omitempty"`
}
// StatisticsVertexStage the Data Lake Analytics job statistics vertex stage information.
type StatisticsVertexStage struct {
// DataRead - READ-ONLY; The amount of data read, in bytes.
DataRead *int64 `json:"dataRead,omitempty"`
// DataReadCrossPod - READ-ONLY; The amount of data read across multiple pods, in bytes.
DataReadCrossPod *int64 `json:"dataReadCrossPod,omitempty"`
// DataReadIntraPod - READ-ONLY; The amount of data read in one pod, in bytes.
DataReadIntraPod *int64 `json:"dataReadIntraPod,omitempty"`
// DataToRead - READ-ONLY; The amount of data remaining to be read, in bytes.
DataToRead *int64 `json:"dataToRead,omitempty"`
// DataWritten - READ-ONLY; The amount of data written, in bytes.
DataWritten *int64 `json:"dataWritten,omitempty"`
// DuplicateDiscardCount - READ-ONLY; The number of duplicates that were discarded.
DuplicateDiscardCount *int32 `json:"duplicateDiscardCount,omitempty"`
// FailedCount - READ-ONLY; The number of failures that occurred in this stage.
FailedCount *int32 `json:"failedCount,omitempty"`
// MaxVertexDataRead - READ-ONLY; The maximum amount of data read in a single vertex, in bytes.
MaxVertexDataRead *int64 `json:"maxVertexDataRead,omitempty"`
// MinVertexDataRead - READ-ONLY; The minimum amount of data read in a single vertex, in bytes.
MinVertexDataRead *int64 `json:"minVertexDataRead,omitempty"`
// ReadFailureCount - READ-ONLY; The number of read failures in this stage.
ReadFailureCount *int32 `json:"readFailureCount,omitempty"`
// RevocationCount - READ-ONLY; The number of vertices that were revoked during this stage.
RevocationCount *int32 `json:"revocationCount,omitempty"`
// RunningCount - READ-ONLY; The number of currently running vertices in this stage.
RunningCount *int32 `json:"runningCount,omitempty"`
// ScheduledCount - READ-ONLY; The number of currently scheduled vertices in this stage.
ScheduledCount *int32 `json:"scheduledCount,omitempty"`
// StageName - READ-ONLY; The name of this stage in job execution.
StageName *string `json:"stageName,omitempty"`
// SucceededCount - READ-ONLY; The number of vertices that succeeded in this stage.
SucceededCount *int32 `json:"succeededCount,omitempty"`
// TempDataWritten - READ-ONLY; The amount of temporary data written, in bytes.
TempDataWritten *int64 `json:"tempDataWritten,omitempty"`
// TotalCount - READ-ONLY; The total vertex count for this stage.
TotalCount *int32 `json:"totalCount,omitempty"`
// TotalFailedTime - READ-ONLY; The amount of time that failed vertices took up in this stage.
TotalFailedTime *string `json:"totalFailedTime,omitempty"`
// TotalProgress - READ-ONLY; The current progress of this stage, as a percentage.
TotalProgress *int32 `json:"totalProgress,omitempty"`
// TotalSucceededTime - READ-ONLY; The amount of time all successful vertices took in this stage.
TotalSucceededTime *string `json:"totalSucceededTime,omitempty"`
// TotalPeakMemUsage - READ-ONLY; The sum of the peak memory usage of all the vertices in the stage, in bytes.
TotalPeakMemUsage *int64 `json:"totalPeakMemUsage,omitempty"`
// TotalExecutionTime - READ-ONLY; The sum of the total execution time of all the vertices in the stage.
TotalExecutionTime *string `json:"totalExecutionTime,omitempty"`
// MaxDataReadVertex - the vertex with the maximum amount of data read.
MaxDataReadVertex *StatisticsVertex `json:"maxDataReadVertex,omitempty"`
// MaxExecutionTimeVertex - the vertex with the maximum execution time.
MaxExecutionTimeVertex *StatisticsVertex `json:"maxExecutionTimeVertex,omitempty"`
// MaxPeakMemUsageVertex - the vertex with the maximum peak memory usage.
MaxPeakMemUsageVertex *StatisticsVertex `json:"maxPeakMemUsageVertex,omitempty"`
// EstimatedVertexCPUCoreCount - READ-ONLY; The estimated vertex CPU core count.
EstimatedVertexCPUCoreCount *int32 `json:"estimatedVertexCpuCoreCount,omitempty"`
// EstimatedVertexPeakCPUCoreCount - READ-ONLY; The estimated vertex peak CPU core count.
EstimatedVertexPeakCPUCoreCount *int32 `json:"estimatedVertexPeakCpuCoreCount,omitempty"`
// EstimatedVertexMemSize - READ-ONLY; The estimated vertex memory size, in bytes.
EstimatedVertexMemSize *int64 `json:"estimatedVertexMemSize,omitempty"`
// AllocatedContainerCPUCoreCount - The statistics information for the allocated container CPU core count.
AllocatedContainerCPUCoreCount *ResourceUsageStatistics `json:"allocatedContainerCpuCoreCount,omitempty"`
// AllocatedContainerMemSize - The statistics information for the allocated container memory size.
AllocatedContainerMemSize *ResourceUsageStatistics `json:"allocatedContainerMemSize,omitempty"`
// UsedVertexCPUCoreCount - The statistics information for the used vertex CPU core count.
UsedVertexCPUCoreCount *ResourceUsageStatistics `json:"usedVertexCpuCoreCount,omitempty"`
// UsedVertexPeakMemSize - The statistics information for the used vertex peak memory size.
UsedVertexPeakMemSize *ResourceUsageStatistics `json:"usedVertexPeakMemSize,omitempty"`
}
// UpdateFuture an abstraction for monitoring and retrieving the results of a long-running operation.
type UpdateFuture struct {
azure.Future
}
// Result returns the result of the asynchronous operation.
// If the operation has not completed it will return an error.
func (future *UpdateFuture) Result(client Client) (i Information, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
err = autorest.NewErrorWithError(err, "job.UpdateFuture", "Result", future.Response(), "Polling failure")
return
}
if !done {
err = azure.NewAsyncOpIncompleteError("job.UpdateFuture")
return
}
sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
if i.Response.Response, err = future.GetResult(sender); err == nil && i.Response.Response.StatusCode != http.StatusNoContent {
i, err = client.UpdateResponder(i.Response.Response)
if err != nil {
err = autorest.NewErrorWithError(err, "job.UpdateFuture", "Result", i.Response.Response, "Failure responding to request")
}
}
return
}
// UpdateJobParameters the parameters that can be used to update existing Data Lake Analytics job
// information properties. (Only for use internally with Scope job type.)
type UpdateJobParameters struct {
// DegreeOfParallelism - The degree of parallelism used for this job.
DegreeOfParallelism *int32 `json:"degreeOfParallelism,omitempty"`
// DegreeOfParallelismPercent - the degree of parallelism in percentage used for this job.
DegreeOfParallelismPercent *float64 `json:"degreeOfParallelismPercent,omitempty"`
// Priority - The priority value for the current job. Lower numbers have a higher priority. By default, a job has a priority of 1000. This must be greater than 0.
Priority *int32 `json:"priority,omitempty"`
// Tags - The key-value pairs used to add additional metadata to the job information.
Tags map[string]*string `json:"tags"`
}
// MarshalJSON is the custom marshaler for UpdateJobParameters.
func (ujp UpdateJobParameters) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
if ujp.DegreeOfParallelism != nil {
objectMap["degreeOfParallelism"] = ujp.DegreeOfParallelism
}
if ujp.DegreeOfParallelismPercent != nil {
objectMap["degreeOfParallelismPercent"] = ujp.DegreeOfParallelismPercent
}
if ujp.Priority != nil {
objectMap["priority"] = ujp.Priority
}
if ujp.Tags != nil {
objectMap["tags"] = ujp.Tags
}
return json.Marshal(objectMap)
}
// USQLJobProperties u-SQL job properties used when retrieving U-SQL jobs.
type USQLJobProperties struct {
// Resources - READ-ONLY; The list of resources that are required by the job.
Resources *[]Resource `json:"resources,omitempty"`
// Statistics - The job specific statistics.
Statistics *Statistics `json:"statistics,omitempty"`
// DebugData - The job specific debug data locations.
DebugData *DataPath `json:"debugData,omitempty"`
// Diagnostics - READ-ONLY; The diagnostics for the job.
Diagnostics *[]Diagnostics `json:"diagnostics,omitempty"`
// AlgebraFilePath - READ-ONLY; The algebra file path after the job has completed.
AlgebraFilePath *string `json:"algebraFilePath,omitempty"`
// TotalCompilationTime - READ-ONLY; The total time this job spent compiling. This value should not be set by the user and will be ignored if it is.
TotalCompilationTime *string `json:"totalCompilationTime,omitempty"`
// TotalQueuedTime - READ-ONLY; The total time this job spent queued. This value should not be set by the user and will be ignored if it is.
TotalQueuedTime *string `json:"totalQueuedTime,omitempty"`
// TotalRunningTime - READ-ONLY; The total time this job spent executing. This value should not be set by the user and will be ignored if it is.
TotalRunningTime *string `json:"totalRunningTime,omitempty"`
// TotalPausedTime - READ-ONLY; The total time this job spent paused. This value should not be set by the user and will be ignored if it is.
TotalPausedTime *string `json:"totalPausedTime,omitempty"`
// RootProcessNodeID - READ-ONLY; The ID used to identify the job manager coordinating job execution. This value should not be set by the user and will be ignored if it is.
RootProcessNodeID *string `json:"rootProcessNodeId,omitempty"`
// YarnApplicationID - READ-ONLY; The ID used to identify the yarn application executing the job. This value should not be set by the user and will be ignored if it is.
YarnApplicationID *string `json:"yarnApplicationId,omitempty"`
// YarnApplicationTimeStamp - READ-ONLY; The timestamp (in ticks) for the yarn application executing the job. This value should not be set by the user and will be ignored if it is.
YarnApplicationTimeStamp *int64 `json:"yarnApplicationTimeStamp,omitempty"`
// CompileMode - READ-ONLY; The specific compilation mode for the job used during execution. If this is not specified during submission, the server will determine the optimal compilation mode. Possible values include: 'Semantic', 'Full', 'SingleBox'
CompileMode CompileMode `json:"compileMode,omitempty"`
// RuntimeVersion - The runtime version of the Data Lake Analytics engine to use for the specific type of job being run.
RuntimeVersion *string `json:"runtimeVersion,omitempty"`
// Script - The script to run. Please note that the maximum script size is 3 MB.
Script *string `json:"script,omitempty"`
// Type - Possible values include: 'TypeJobProperties', 'TypeUSQL', 'TypeHive', 'TypeScope'
Type Type `json:"type,omitempty"`
}
// MarshalJSON is the custom marshaler for USQLJobProperties.
func (usjp USQLJobProperties) MarshalJSON() ([]byte, error) {
usjp.Type = TypeUSQL
objectMap := make(map[string]interface{})
if usjp.Statistics != nil {
objectMap["statistics"] = usjp.Statistics
}
if usjp.DebugData != nil {
objectMap["debugData"] = usjp.DebugData
}
if usjp.RuntimeVersion != nil {
objectMap["runtimeVersion"] = usjp.RuntimeVersion
}
if usjp.Script != nil {
objectMap["script"] = usjp.Script
}
if usjp.Type != "" {
objectMap["type"] = usjp.Type
}
return json.Marshal(objectMap)
}
// AsUSQLJobProperties is the BasicProperties implementation for USQLJobProperties.
func (usjp USQLJobProperties) AsUSQLJobProperties() (*USQLJobProperties, bool) {
return &usjp, true
}
// AsHiveJobProperties is the BasicProperties implementation for USQLJobProperties.
func (usjp USQLJobProperties) AsHiveJobProperties() (*HiveJobProperties, bool) {
return nil, false
}
// AsScopeJobProperties is the BasicProperties implementation for USQLJobProperties.
func (usjp USQLJobProperties) AsScopeJobProperties() (*ScopeJobProperties, bool) {
return nil, false
}
// AsProperties is the BasicProperties implementation for USQLJobProperties.
func (usjp USQLJobProperties) AsProperties() (*Properties, bool) {
return nil, false
}
// AsBasicProperties is the BasicProperties implementation for USQLJobProperties.
func (usjp USQLJobProperties) AsBasicProperties() (BasicProperties, bool) {
return &usjp, true
}
// YieldFuture an abstraction for monitoring and retrieving the results of a long-running operation.
type YieldFuture struct {
azure.Future
}
// Result returns the result of the asynchronous operation.
// If the operation has not completed it will return an error.
func (future *YieldFuture) Result(client Client) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
err = autorest.NewErrorWithError(err, "job.YieldFuture", "Result", future.Response(), "Polling failure")
return
}
if !done {
err = azure.NewAsyncOpIncompleteError("job.YieldFuture")
return
}
ar.Response = future.Response()
return
}
You can’t perform that action at this time.