diff --git a/apis/v1alpha1/endpoint.go b/apis/v1alpha1/endpoint.go index 231c6d7c..04498a5b 100644 --- a/apis/v1alpha1/endpoint.go +++ b/apis/v1alpha1/endpoint.go @@ -22,11 +22,14 @@ import ( // EndpointSpec defines the desired state of Endpoint type EndpointSpec struct { + // The name of an endpoint configuration. For more information, see CreateEndpointConfig. // +kubebuilder:validation:Required EndpointConfigName *string `json:"endpointConfigName"` + // The name of the endpoint.The name must be unique within an AWS Region in + // your AWS account. The name is case-insensitive in CreateEndpoint, but the + // case is preserved and must be matched in . // +kubebuilder:validation:Required EndpointName *string `json:"endpointName"` - Tags []*Tag `json:"tags,omitempty"` } // EndpointStatus defines the observed state of Endpoint @@ -39,17 +42,56 @@ type EndpointStatus struct { // contains a collection of `ackv1alpha1.Condition` objects that describe // the various terminal states of the CR and its backend AWS service API // resource - Conditions []*ackv1alpha1.Condition `json:"conditions"` - EndpointConfigName *string `json:"endpointConfigName,omitempty"` - EndpointStatus *string `json:"endpointStatus,omitempty"` - FailureReason *string `json:"failureReason,omitempty"` + Conditions []*ackv1alpha1.Condition `json:"conditions"` + // A timestamp that shows when the endpoint was created. + CreationTime *metav1.Time `json:"creationTime,omitempty"` + // The status of the endpoint. + // + // * OutOfService: Endpoint is not available to take incoming requests. + // + // * Creating: CreateEndpoint is executing. + // + // * Updating: UpdateEndpoint or UpdateEndpointWeightsAndCapacities is executing. + // + // * SystemUpdating: Endpoint is undergoing maintenance and cannot be updated + // or deleted or re-scaled until it has completed. This maintenance operation + // does not change any customer-specified values such as VPC config, KMS + // encryption, model, instance type, or instance count. + // + // * RollingBack: Endpoint fails to scale up or down or change its variant + // weight and is in the process of rolling back to its previous configuration. + // Once the rollback completes, endpoint returns to an InService status. + // This transitional status only applies to an endpoint that has autoscaling + // enabled and is undergoing variant weight or capacity changes as part of + // an UpdateEndpointWeightsAndCapacities call or when the UpdateEndpointWeightsAndCapacities + // operation is called explicitly. + // + // * InService: Endpoint is available to process incoming requests. + // + // * Deleting: DeleteEndpoint is executing. + // + // * Failed: Endpoint could not be created, updated, or re-scaled. Use DescribeEndpointOutput$FailureReason + // for information about the failure. DeleteEndpoint is the only operation + // that can be performed on a failed endpoint. + EndpointStatus *string `json:"endpointStatus,omitempty"` + // If the status of the endpoint is Failed, the reason why it failed. + FailureReason *string `json:"failureReason,omitempty"` + // Name of the Amazon SageMaker endpoint configuration. + LastEndpointConfigNameForUpdate *string `json:"lastEndpointConfigNameForUpdate,omitempty"` + // A timestamp that shows when the endpoint was last modified. + LastModifiedTime *metav1.Time `json:"lastModifiedTime,omitempty"` + // The name of the endpoint configuration associated with this endpoint. + LatestEndpointConfigName *string `json:"latestEndpointConfigName,omitempty"` + // An array of ProductionVariantSummary objects, one for each model hosted behind + // this endpoint. + ProductionVariants []*ProductionVariantSummary `json:"productionVariants,omitempty"` } // Endpoint is the Schema for the Endpoints API // +kubebuilder:object:root=true // +kubebuilder:subresource:status -// +kubebuilder:printcolumn:name="EndpointStatus",type=string,JSONPath=`.status.endpointStatus` // +kubebuilder:printcolumn:name="FailureReason",type=string,JSONPath=`.status.failureReason` +// +kubebuilder:printcolumn:name="EndpointStatus",type=string,JSONPath=`.status.endpointStatus` type Endpoint struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` diff --git a/apis/v1alpha1/endpoint_config.go b/apis/v1alpha1/endpoint_config.go index 8607b3e0..b58b6461 100644 --- a/apis/v1alpha1/endpoint_config.go +++ b/apis/v1alpha1/endpoint_config.go @@ -23,12 +23,47 @@ import ( // EndpointConfigSpec defines the desired state of EndpointConfig type EndpointConfigSpec struct { DataCaptureConfig *DataCaptureConfig `json:"dataCaptureConfig,omitempty"` + // The name of the endpoint configuration. You specify this name in a CreateEndpoint + // request. // +kubebuilder:validation:Required EndpointConfigName *string `json:"endpointConfigName"` - KMSKeyID *string `json:"kmsKeyID,omitempty"` + // The Amazon Resource Name (ARN) of a AWS Key Management Service key that Amazon + // SageMaker uses to encrypt data on the storage volume attached to the ML compute + // instance that hosts the endpoint. + // + // The KmsKeyId can be any of the following formats: + // + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Alias name: alias/ExampleAlias + // + // * Alias name ARN: arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias + // + // The KMS key policy must grant permission to the IAM role that you specify + // in your CreateEndpoint, UpdateEndpoint requests. For more information, refer + // to the AWS Key Management Service section Using Key Policies in AWS KMS (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html) + // + // Certain Nitro-based instances include local storage, dependent on the instance + // type. Local storage volumes are encrypted using a hardware module on the + // instance. You can't request a KmsKeyId when using an instance type with local + // storage. If any of the models that you specify in the ProductionVariants + // parameter use nitro-based instances with local storage, do not specify a + // value for the KmsKeyId parameter. If you specify a value for KmsKeyId when + // using any nitro-based instances with local storage, the call to CreateEndpointConfig + // fails. + // + // For a list of instance types that support local instance storage, see Instance + // Store Volumes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html#instance-store-volumes). + // + // For more information about local instance storage encryption, see SSD Instance + // Store Volumes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ssd-instance-store.html). + KMSKeyID *string `json:"kmsKeyID,omitempty"` + // An list of ProductionVariant objects, one for each model that you want to + // host at this endpoint. // +kubebuilder:validation:Required ProductionVariants []*ProductionVariant `json:"productionVariants"` - Tags []*Tag `json:"tags,omitempty"` } // EndpointConfigStatus defines the observed state of EndpointConfig diff --git a/apis/v1alpha1/hyper_parameter_tuning_job.go b/apis/v1alpha1/hyper_parameter_tuning_job.go index f4b8c503..ffb76b4c 100644 --- a/apis/v1alpha1/hyper_parameter_tuning_job.go +++ b/apis/v1alpha1/hyper_parameter_tuning_job.go @@ -22,14 +22,44 @@ import ( // HyperParameterTuningJobSpec defines the desired state of HyperParameterTuningJob type HyperParameterTuningJobSpec struct { + // The HyperParameterTuningJobConfig object that describes the tuning job, including + // the search strategy, the objective metric used to evaluate training jobs, + // ranges of parameters to search, and resource limits for the tuning job. For + // more information, see How Hyperparameter Tuning Works (https://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning-how-it-works.html). // +kubebuilder:validation:Required HyperParameterTuningJobConfig *HyperParameterTuningJobConfig `json:"hyperParameterTuningJobConfig"` + // The name of the tuning job. This name is the prefix for the names of all + // training jobs that this tuning job launches. The name must be unique within + // the same AWS account and AWS Region. The name must have 1 to 32 characters. + // Valid characters are a-z, A-Z, 0-9, and : + = @ _ % - (hyphen). The name + // is not case sensitive. // +kubebuilder:validation:Required - HyperParameterTuningJobName *string `json:"hyperParameterTuningJobName"` - Tags []*Tag `json:"tags,omitempty"` - TrainingJobDefinition *HyperParameterTrainingJobDefinition `json:"trainingJobDefinition,omitempty"` - TrainingJobDefinitions []*HyperParameterTrainingJobDefinition `json:"trainingJobDefinitions,omitempty"` - WarmStartConfig *HyperParameterTuningJobWarmStartConfig `json:"warmStartConfig,omitempty"` + HyperParameterTuningJobName *string `json:"hyperParameterTuningJobName"` + // The HyperParameterTrainingJobDefinition object that describes the training + // jobs that this tuning job launches, including static hyperparameters, input + // data configuration, output data configuration, resource configuration, and + // stopping condition. + TrainingJobDefinition *HyperParameterTrainingJobDefinition `json:"trainingJobDefinition,omitempty"` + // A list of the HyperParameterTrainingJobDefinition objects launched for this + // tuning job. + TrainingJobDefinitions []*HyperParameterTrainingJobDefinition `json:"trainingJobDefinitions,omitempty"` + // Specifies the configuration for starting the hyperparameter tuning job using + // one or more previous tuning jobs as a starting point. The results of previous + // tuning jobs are used to inform which combinations of hyperparameters to search + // over in the new tuning job. + // + // All training jobs launched by the new hyperparameter tuning job are evaluated + // by using the objective metric. If you specify IDENTICAL_DATA_AND_ALGORITHM + // as the WarmStartType value for the warm start configuration, the training + // job that performs the best in the new tuning job is compared to the best + // training jobs from the parent tuning jobs. From these, the training job that + // performs the best as measured by the objective metric is returned as the + // overall best training job. + // + // All training jobs launched by parent hyperparameter tuning jobs and the new + // hyperparameter tuning jobs count against the limit of training jobs for the + // tuning job. + WarmStartConfig *HyperParameterTuningJobWarmStartConfig `json:"warmStartConfig,omitempty"` } // HyperParameterTuningJobStatus defines the observed state of HyperParameterTuningJob @@ -42,11 +72,20 @@ type HyperParameterTuningJobStatus struct { // contains a collection of `ackv1alpha1.Condition` objects that describe // the various terminal states of the CR and its backend AWS service API // resource - Conditions []*ackv1alpha1.Condition `json:"conditions"` - BestTrainingJob *HyperParameterTrainingJobSummary `json:"bestTrainingJob,omitempty"` - FailureReason *string `json:"failureReason,omitempty"` - HyperParameterTuningJobStatus *string `json:"hyperParameterTuningJobStatus,omitempty"` - OverallBestTrainingJob *HyperParameterTrainingJobSummary `json:"overallBestTrainingJob,omitempty"` + Conditions []*ackv1alpha1.Condition `json:"conditions"` + // A TrainingJobSummary object that describes the training job that completed + // with the best current HyperParameterTuningJobObjective. + BestTrainingJob *HyperParameterTrainingJobSummary `json:"bestTrainingJob,omitempty"` + // If the tuning job failed, the reason it failed. + FailureReason *string `json:"failureReason,omitempty"` + // The status of the tuning job: InProgress, Completed, Failed, Stopping, or + // Stopped. + HyperParameterTuningJobStatus *string `json:"hyperParameterTuningJobStatus,omitempty"` + // If the hyperparameter tuning job is an warm start tuning job with a WarmStartType + // of IDENTICAL_DATA_AND_ALGORITHM, this is the TrainingJobSummary for the training + // job with the best objective metric value of all training jobs launched by + // this tuning job and all parent jobs specified for the warm start tuning job. + OverallBestTrainingJob *HyperParameterTrainingJobSummary `json:"overallBestTrainingJob,omitempty"` } // HyperParameterTuningJob is the Schema for the HyperParameterTuningJobs API diff --git a/apis/v1alpha1/model.go b/apis/v1alpha1/model.go index 6446ece3..25d4558e 100644 --- a/apis/v1alpha1/model.go +++ b/apis/v1alpha1/model.go @@ -22,16 +22,37 @@ import ( // ModelSpec defines the desired state of Model type ModelSpec struct { - Containers []*ContainerDefinition `json:"containers,omitempty"` - EnableNetworkIsolation *bool `json:"enableNetworkIsolation,omitempty"` + // Specifies the containers in the inference pipeline. + Containers []*ContainerDefinition `json:"containers,omitempty"` + // Isolates the model container. No inbound or outbound network calls can be + // made to or from the model container. + EnableNetworkIsolation *bool `json:"enableNetworkIsolation,omitempty"` + // The Amazon Resource Name (ARN) of the IAM role that Amazon SageMaker can + // assume to access model artifacts and docker image for deployment on ML compute + // instances or for batch transform jobs. Deploying on ML compute instances + // is part of model hosting. For more information, see Amazon SageMaker Roles + // (https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html). + // + // To be able to pass this role to Amazon SageMaker, the caller of this API + // must have the iam:PassRole permission. // +kubebuilder:validation:Required - ExecutionRoleARN *string `json:"executionRoleARN"` + ExecutionRoleARN *string `json:"executionRoleARN"` + // Specifies details of how containers in a multi-container endpoint are called. InferenceExecutionConfig *InferenceExecutionConfig `json:"inferenceExecutionConfig,omitempty"` + // The name of the new model. // +kubebuilder:validation:Required - ModelName *string `json:"modelName"` + ModelName *string `json:"modelName"` + // The location of the primary docker image containing inference code, associated + // artifacts, and custom environment map that the inference code uses when the + // model is deployed for predictions. PrimaryContainer *ContainerDefinition `json:"primaryContainer,omitempty"` - Tags []*Tag `json:"tags,omitempty"` - VPCConfig *VPCConfig `json:"vpcConfig,omitempty"` + // A VpcConfig object that specifies the VPC that you want your model to connect + // to. Control access to and from your model container by configuring the VPC. + // VpcConfig is used in hosting services and in batch transform. For more information, + // see Protect Endpoints by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html) + // and Protect Data in Batch Transform Jobs by Using an Amazon Virtual Private + // Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/batch-vpc.html). + VPCConfig *VPCConfig `json:"vpcConfig,omitempty"` } // ModelStatus defines the observed state of Model diff --git a/apis/v1alpha1/processing_job.go b/apis/v1alpha1/processing_job.go index 5eb01243..fc44a742 100644 --- a/apis/v1alpha1/processing_job.go +++ b/apis/v1alpha1/processing_job.go @@ -22,21 +22,37 @@ import ( // ProcessingJobSpec defines the desired state of ProcessingJob type ProcessingJobSpec struct { + // Configures the processing job to run a specified Docker container image. // +kubebuilder:validation:Required - AppSpecification *AppSpecification `json:"appSpecification"` - Environment map[string]*string `json:"environment,omitempty"` - ExperimentConfig *ExperimentConfig `json:"experimentConfig,omitempty"` - NetworkConfig *NetworkConfig `json:"networkConfig,omitempty"` + AppSpecification *AppSpecification `json:"appSpecification"` + // The environment variables to set in the Docker container. Up to 100 key and + // values entries in the map are supported. + Environment map[string]*string `json:"environment,omitempty"` + + ExperimentConfig *ExperimentConfig `json:"experimentConfig,omitempty"` + // Networking options for a processing job, such as whether to allow inbound + // and outbound network calls to and from processing containers, and the VPC + // subnets and security groups to use for VPC-enabled processing jobs. + NetworkConfig *NetworkConfig `json:"networkConfig,omitempty"` + // An array of inputs configuring the data to download into the processing container. ProcessingInputs []*ProcessingInput `json:"processingInputs,omitempty"` + // The name of the processing job. The name must be unique within an AWS Region + // in the AWS account. // +kubebuilder:validation:Required - ProcessingJobName *string `json:"processingJobName"` + ProcessingJobName *string `json:"processingJobName"` + // Output configuration for the processing job. ProcessingOutputConfig *ProcessingOutputConfig `json:"processingOutputConfig,omitempty"` + // Identifies the resources, ML compute instances, and ML storage volumes to + // deploy for a processing job. In distributed training, you specify more than + // one instance. // +kubebuilder:validation:Required ProcessingResources *ProcessingResources `json:"processingResources"` + // The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume + // to perform tasks on your behalf. // +kubebuilder:validation:Required - RoleARN *string `json:"roleARN"` + RoleARN *string `json:"roleARN"` + // The time limit for how long the processing job is allowed to run. StoppingCondition *ProcessingStoppingCondition `json:"stoppingCondition,omitempty"` - Tags []*Tag `json:"tags,omitempty"` } // ProcessingJobStatus defines the observed state of ProcessingJob @@ -49,9 +65,12 @@ type ProcessingJobStatus struct { // contains a collection of `ackv1alpha1.Condition` objects that describe // the various terminal states of the CR and its backend AWS service API // resource - Conditions []*ackv1alpha1.Condition `json:"conditions"` - FailureReason *string `json:"failureReason,omitempty"` - ProcessingJobStatus *string `json:"processingJobStatus,omitempty"` + Conditions []*ackv1alpha1.Condition `json:"conditions"` + // A string, up to one KB in size, that contains the reason a processing job + // failed, if it failed. + FailureReason *string `json:"failureReason,omitempty"` + // Provides the status of a processing job. + ProcessingJobStatus *string `json:"processingJobStatus,omitempty"` } // ProcessingJob is the Schema for the ProcessingJobs API diff --git a/apis/v1alpha1/training_job.go b/apis/v1alpha1/training_job.go index 341f1818..6c78d1b1 100644 --- a/apis/v1alpha1/training_job.go +++ b/apis/v1alpha1/training_job.go @@ -22,32 +22,126 @@ import ( // TrainingJobSpec defines the desired state of TrainingJob type TrainingJobSpec struct { + // The registry path of the Docker image that contains the training algorithm + // and algorithm-specific metadata, including the input mode. For more information + // about algorithms provided by Amazon SageMaker, see Algorithms (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). + // For information about providing your own algorithms, see Using Your Own Algorithms + // with Amazon SageMaker (https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms.html). // +kubebuilder:validation:Required - AlgorithmSpecification *AlgorithmSpecification `json:"algorithmSpecification"` - CheckpointConfig *CheckpointConfig `json:"checkpointConfig,omitempty"` - DebugHookConfig *DebugHookConfig `json:"debugHookConfig,omitempty"` - DebugRuleConfigurations []*DebugRuleConfiguration `json:"debugRuleConfigurations,omitempty"` - EnableInterContainerTrafficEncryption *bool `json:"enableInterContainerTrafficEncryption,omitempty"` - EnableManagedSpotTraining *bool `json:"enableManagedSpotTraining,omitempty"` - EnableNetworkIsolation *bool `json:"enableNetworkIsolation,omitempty"` - ExperimentConfig *ExperimentConfig `json:"experimentConfig,omitempty"` - HyperParameters map[string]*string `json:"hyperParameters,omitempty"` - InputDataConfig []*Channel `json:"inputDataConfig,omitempty"` + AlgorithmSpecification *AlgorithmSpecification `json:"algorithmSpecification"` + // Contains information about the output location for managed spot training + // checkpoint data. + CheckpointConfig *CheckpointConfig `json:"checkpointConfig,omitempty"` + + DebugHookConfig *DebugHookConfig `json:"debugHookConfig,omitempty"` + // Configuration information for Debugger rules for debugging output tensors. + DebugRuleConfigurations []*DebugRuleConfiguration `json:"debugRuleConfigurations,omitempty"` + // To encrypt all communications between ML compute instances in distributed + // training, choose True. Encryption provides greater security for distributed + // training, but training might take longer. How long it takes depends on the + // amount of communication between compute instances, especially if you use + // a deep learning algorithm in distributed training. For more information, + // see Protect Communications Between ML Compute Instances in a Distributed + // Training Job (https://docs.aws.amazon.com/sagemaker/latest/dg/train-encrypt.html). + EnableInterContainerTrafficEncryption *bool `json:"enableInterContainerTrafficEncryption,omitempty"` + // To train models using managed spot training, choose True. Managed spot training + // provides a fully managed and scalable infrastructure for training machine + // learning models. this option is useful when training jobs can be interrupted + // and when there is flexibility when the training job is run. + // + // The complete and intermediate results of jobs are stored in an Amazon S3 + // bucket, and can be used as a starting point to train models incrementally. + // Amazon SageMaker provides metrics and logs in CloudWatch. They can be used + // to see when managed spot training jobs are running, interrupted, resumed, + // or completed. + EnableManagedSpotTraining *bool `json:"enableManagedSpotTraining,omitempty"` + // Isolates the training container. No inbound or outbound network calls can + // be made, except for calls between peers within a training cluster for distributed + // training. If you enable network isolation for training jobs that are configured + // to use a VPC, Amazon SageMaker downloads and uploads customer data and model + // artifacts through the specified VPC, but the training container does not + // have network access. + EnableNetworkIsolation *bool `json:"enableNetworkIsolation,omitempty"` + + ExperimentConfig *ExperimentConfig `json:"experimentConfig,omitempty"` + // Algorithm-specific parameters that influence the quality of the model. You + // set hyperparameters before you start the learning process. For a list of + // hyperparameters for each training algorithm provided by Amazon SageMaker, + // see Algorithms (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). + // + // You can specify a maximum of 100 hyperparameters. Each hyperparameter is + // a key-value pair. Each key and value is limited to 256 characters, as specified + // by the Length Constraint. + HyperParameters map[string]*string `json:"hyperParameters,omitempty"` + // An array of Channel objects. Each channel is a named input source. InputDataConfig + // describes the input data and its location. + // + // Algorithms can accept input data from one or more channels. For example, + // an algorithm might have two channels of input data, training_data and validation_data. + // The configuration for each channel provides the S3, EFS, or FSx location + // where the input data is stored. It also provides information about the stored + // data: the MIME type, compression method, and whether the data is wrapped + // in RecordIO format. + // + // Depending on the input mode that the algorithm supports, Amazon SageMaker + // either copies input data files from an S3 bucket to a local directory in + // the Docker container, or makes it available as input streams. For example, + // if you specify an EFS location, input data files will be made available as + // input streams. They do not need to be downloaded. + InputDataConfig []*Channel `json:"inputDataConfig,omitempty"` + // Specifies the path to the S3 location where you want to store model artifacts. + // Amazon SageMaker creates subfolders for the artifacts. // +kubebuilder:validation:Required - OutputDataConfig *OutputDataConfig `json:"outputDataConfig"` - ProfilerConfig *ProfilerConfig `json:"profilerConfig,omitempty"` + OutputDataConfig *OutputDataConfig `json:"outputDataConfig"` + + ProfilerConfig *ProfilerConfig `json:"profilerConfig,omitempty"` + // Configuration information for Debugger rules for profiling system and framework + // metrics. ProfilerRuleConfigurations []*ProfilerRuleConfiguration `json:"profilerRuleConfigurations,omitempty"` + // The resources, including the ML compute instances and ML storage volumes, + // to use for model training. + // + // ML storage volumes store model artifacts and incremental states. Training + // algorithms might also use ML storage volumes for scratch space. If you want + // Amazon SageMaker to use the ML storage volume to store the training data, + // choose File as the TrainingInputMode in the algorithm specification. For + // distributed training algorithms, specify an instance count greater than 1. // +kubebuilder:validation:Required ResourceConfig *ResourceConfig `json:"resourceConfig"` + // The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume + // to perform tasks on your behalf. + // + // During model training, Amazon SageMaker needs your permission to read input + // data from an S3 bucket, download a Docker image that contains training code, + // write model artifacts to an S3 bucket, write logs to Amazon CloudWatch Logs, + // and publish metrics to Amazon CloudWatch. You grant permissions for all of + // these tasks to an IAM role. For more information, see Amazon SageMaker Roles + // (https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html). + // + // To be able to pass this role to Amazon SageMaker, the caller of this API + // must have the iam:PassRole permission. // +kubebuilder:validation:Required RoleARN *string `json:"roleARN"` + // Specifies a limit to how long a model training job can run. When the job + // reaches the time limit, Amazon SageMaker ends the training job. Use this + // API to cap model training costs. + // + // To stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal, which + // delays job termination for 120 seconds. Algorithms can use this 120-second + // window to save the model artifacts, so the results of training are not lost. // +kubebuilder:validation:Required - StoppingCondition *StoppingCondition `json:"stoppingCondition"` - Tags []*Tag `json:"tags,omitempty"` + StoppingCondition *StoppingCondition `json:"stoppingCondition"` + TensorBoardOutputConfig *TensorBoardOutputConfig `json:"tensorBoardOutputConfig,omitempty"` + // The name of the training job. The name must be unique within an AWS Region + // in an AWS account. // +kubebuilder:validation:Required - TrainingJobName *string `json:"trainingJobName"` - VPCConfig *VPCConfig `json:"vpcConfig,omitempty"` + TrainingJobName *string `json:"trainingJobName"` + // A VpcConfig object that specifies the VPC that you want your training job + // to connect to. Control access to and from your training container by configuring + // the VPC. For more information, see Protect Training Jobs by Using an Amazon + // Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html). + VPCConfig *VPCConfig `json:"vpcConfig,omitempty"` } // TrainingJobStatus defines the observed state of TrainingJob @@ -60,10 +154,83 @@ type TrainingJobStatus struct { // contains a collection of `ackv1alpha1.Condition` objects that describe // the various terminal states of the CR and its backend AWS service API // resource - Conditions []*ackv1alpha1.Condition `json:"conditions"` - FailureReason *string `json:"failureReason,omitempty"` - SecondaryStatus *string `json:"secondaryStatus,omitempty"` - TrainingJobStatus *string `json:"trainingJobStatus,omitempty"` + Conditions []*ackv1alpha1.Condition `json:"conditions"` + // If the training job failed, the reason it failed. + FailureReason *string `json:"failureReason,omitempty"` + // Provides detailed information about the state of the training job. For detailed + // information on the secondary status of the training job, see StatusMessage + // under SecondaryStatusTransition. + // + // Amazon SageMaker provides primary statuses and secondary statuses that apply + // to each of them: + // + // InProgress + // + // * Starting - Starting the training job. + // + // * Downloading - An optional stage for algorithms that support File training + // input mode. It indicates that data is being downloaded to the ML storage + // volumes. + // + // * Training - Training is in progress. + // + // * Interrupted - The job stopped because the managed spot training instances + // were interrupted. + // + // * Uploading - Training is complete and the model artifacts are being uploaded + // to the S3 location. + // + // Completed + // + // * Completed - The training job has completed. + // + // Failed + // + // * Failed - The training job has failed. The reason for the failure is + // returned in the FailureReason field of DescribeTrainingJobResponse. + // + // Stopped + // + // * MaxRuntimeExceeded - The job stopped because it exceeded the maximum + // allowed runtime. + // + // * MaxWaitTimeExceeded - The job stopped because it exceeded the maximum + // allowed wait time. + // + // * Stopped - The training job has stopped. + // + // Stopping + // + // * Stopping - Stopping the training job. + // + // Valid values for SecondaryStatus are subject to change. + // + // We no longer support the following secondary statuses: + // + // * LaunchingMLInstances + // + // * PreparingTrainingStack + // + // * DownloadingTrainingImage + SecondaryStatus *string `json:"secondaryStatus,omitempty"` + // The status of the training job. + // + // Amazon SageMaker provides the following training job statuses: + // + // * InProgress - The training is in progress. + // + // * Completed - The training job has completed. + // + // * Failed - The training job has failed. To see the reason for the failure, + // see the FailureReason field in the response to a DescribeTrainingJobResponse + // call. + // + // * Stopping - The training job is stopping. + // + // * Stopped - The training job has stopped. + // + // For more detailed information, see SecondaryStatus. + TrainingJobStatus *string `json:"trainingJobStatus,omitempty"` } // TrainingJob is the Schema for the TrainingJobs API diff --git a/apis/v1alpha1/transform_job.go b/apis/v1alpha1/transform_job.go index 94f61c81..c9243c62 100644 --- a/apis/v1alpha1/transform_job.go +++ b/apis/v1alpha1/transform_job.go @@ -22,22 +22,72 @@ import ( // TransformJobSpec defines the desired state of TransformJob type TransformJobSpec struct { - BatchStrategy *string `json:"batchStrategy,omitempty"` - DataProcessing *DataProcessing `json:"dataProcessing,omitempty"` - Environment map[string]*string `json:"environment,omitempty"` - ExperimentConfig *ExperimentConfig `json:"experimentConfig,omitempty"` - MaxConcurrentTransforms *int64 `json:"maxConcurrentTransforms,omitempty"` - MaxPayloadInMB *int64 `json:"maxPayloadInMB,omitempty"` - ModelClientConfig *ModelClientConfig `json:"modelClientConfig,omitempty"` + // Specifies the number of records to include in a mini-batch for an HTTP inference + // request. A record is a single unit of input data that inference can be made + // on. For example, a single line in a CSV file is a record. + // + // To enable the batch strategy, you must set the SplitType property to Line, + // RecordIO, or TFRecord. + // + // To use only one record when making an HTTP invocation request to a container, + // set BatchStrategy to SingleRecord and SplitType to Line. + // + // To fit as many records in a mini-batch as can fit within the MaxPayloadInMB + // limit, set BatchStrategy to MultiRecord and SplitType to Line. + BatchStrategy *string `json:"batchStrategy,omitempty"` + // The data structure used to specify the data to be used for inference in a + // batch transform job and to associate the data that is relevant to the prediction + // results in the output. The input filter provided allows you to exclude input + // data that is not needed for inference in a batch transform job. The output + // filter provided allows you to include input data relevant to interpreting + // the predictions in the output from the job. For more information, see Associate + // Prediction Results with their Corresponding Input Records (https://docs.aws.amazon.com/sagemaker/latest/dg/batch-transform-data-processing.html). + DataProcessing *DataProcessing `json:"dataProcessing,omitempty"` + // The environment variables to set in the Docker container. We support up to + // 16 key and values entries in the map. + Environment map[string]*string `json:"environment,omitempty"` + + ExperimentConfig *ExperimentConfig `json:"experimentConfig,omitempty"` + // The maximum number of parallel requests that can be sent to each instance + // in a transform job. If MaxConcurrentTransforms is set to 0 or left unset, + // Amazon SageMaker checks the optional execution-parameters to determine the + // settings for your chosen algorithm. If the execution-parameters endpoint + // is not enabled, the default value is 1. For more information on execution-parameters, + // see How Containers Serve Requests (https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-batch-code.html#your-algorithms-batch-code-how-containe-serves-requests). + // For built-in algorithms, you don't need to set a value for MaxConcurrentTransforms. + MaxConcurrentTransforms *int64 `json:"maxConcurrentTransforms,omitempty"` + // The maximum allowed size of the payload, in MB. A payload is the data portion + // of a record (without metadata). The value in MaxPayloadInMB must be greater + // than, or equal to, the size of a single record. To estimate the size of a + // record in MB, divide the size of your dataset by the number of records. To + // ensure that the records fit within the maximum payload size, we recommend + // using a slightly larger value. The default value is 6 MB. + // + // For cases where the payload might be arbitrarily large and is transmitted + // using HTTP chunked encoding, set the value to 0. This feature works only + // in supported algorithms. Currently, Amazon SageMaker built-in algorithms + // do not support HTTP chunked encoding. + MaxPayloadInMB *int64 `json:"maxPayloadInMB,omitempty"` + // Configures the timeout and maximum number of retries for processing a transform + // job invocation. + ModelClientConfig *ModelClientConfig `json:"modelClientConfig,omitempty"` + // The name of the model that you want to use for the transform job. ModelName + // must be the name of an existing Amazon SageMaker model within an AWS Region + // in an AWS account. // +kubebuilder:validation:Required ModelName *string `json:"modelName"` - Tags []*Tag `json:"tags,omitempty"` + // Describes the input source and the way the transform job consumes it. // +kubebuilder:validation:Required TransformInput *TransformInput `json:"transformInput"` + // The name of the transform job. The name must be unique within an AWS Region + // in an AWS account. // +kubebuilder:validation:Required TransformJobName *string `json:"transformJobName"` + // Describes the results of the transform job. // +kubebuilder:validation:Required TransformOutput *TransformOutput `json:"transformOutput"` + // Describes the resources, including ML instance types and ML instance count, + // to use for the transform job. // +kubebuilder:validation:Required TransformResources *TransformResources `json:"transformResources"` } @@ -52,9 +102,15 @@ type TransformJobStatus struct { // contains a collection of `ackv1alpha1.Condition` objects that describe // the various terminal states of the CR and its backend AWS service API // resource - Conditions []*ackv1alpha1.Condition `json:"conditions"` - FailureReason *string `json:"failureReason,omitempty"` - TransformJobStatus *string `json:"transformJobStatus,omitempty"` + Conditions []*ackv1alpha1.Condition `json:"conditions"` + // If the transform job failed, FailureReason describes why it failed. A transform + // job creates a log file, which includes error messages, and stores it as an + // Amazon S3 object. For more information, see Log Amazon SageMaker Events with + // Amazon CloudWatch (https://docs.aws.amazon.com/sagemaker/latest/dg/logging-cloudwatch.html). + FailureReason *string `json:"failureReason,omitempty"` + // The status of the transform job. If the transform job failed, the reason + // is returned in the FailureReason field. + TransformJobStatus *string `json:"transformJobStatus,omitempty"` } // TransformJob is the Schema for the TransformJobs API diff --git a/apis/v1alpha1/types.go b/apis/v1alpha1/types.go index e305a891..77149d31 100644 --- a/apis/v1alpha1/types.go +++ b/apis/v1alpha1/types.go @@ -358,7 +358,6 @@ type Endpoint_SDK struct { FailureReason *string `json:"failureReason,omitempty"` LastModifiedTime *metav1.Time `json:"lastModifiedTime,omitempty"` ProductionVariants []*ProductionVariantSummary `json:"productionVariants,omitempty"` - Tags []*Tag `json:"tags,omitempty"` } type Experiment struct { @@ -366,7 +365,6 @@ type Experiment struct { DisplayName *string `json:"displayName,omitempty"` ExperimentName *string `json:"experimentName,omitempty"` LastModifiedTime *metav1.Time `json:"lastModifiedTime,omitempty"` - Tags []*Tag `json:"tags,omitempty"` } type ExperimentConfig struct { @@ -386,7 +384,6 @@ type FeatureGroup struct { FailureReason *string `json:"failureReason,omitempty"` FeatureGroupName *string `json:"featureGroupName,omitempty"` RoleARN *string `json:"roleARN,omitempty"` - Tags []*Tag `json:"tags,omitempty"` } type FeatureGroupSummary struct { @@ -617,7 +614,6 @@ type ModelExplainabilityBaselineConfig struct { type ModelPackage struct { LastModifiedTime *metav1.Time `json:"lastModifiedTime,omitempty"` - Tags []*Tag `json:"tags,omitempty"` } type ModelPackageContainerDefinition struct { @@ -626,10 +622,6 @@ type ModelPackageContainerDefinition struct { ModelDataURL *string `json:"modelDataURL,omitempty"` } -type ModelPackageGroup struct { - Tags []*Tag `json:"tags,omitempty"` -} - type ModelPackageStatusItem struct { FailureReason *string `json:"failureReason,omitempty"` } @@ -717,7 +709,6 @@ type MonitoringSchedule struct { FailureReason *string `json:"failureReason,omitempty"` LastModifiedTime *metav1.Time `json:"lastModifiedTime,omitempty"` MonitoringScheduleARN *string `json:"monitoringScheduleARN,omitempty"` - Tags []*Tag `json:"tags,omitempty"` } type MonitoringScheduleSummary struct { @@ -789,7 +780,6 @@ type Pipeline struct { LastModifiedTime *metav1.Time `json:"lastModifiedTime,omitempty"` LastRunTime *metav1.Time `json:"lastRunTime,omitempty"` RoleARN *string `json:"roleARN,omitempty"` - Tags []*Tag `json:"tags,omitempty"` } type PipelineExecution struct { @@ -868,7 +858,6 @@ type ProcessingJob_SDK struct { ProcessingStartTime *metav1.Time `json:"processingStartTime,omitempty"` RoleARN *string `json:"roleARN,omitempty"` StoppingCondition *ProcessingStoppingCondition `json:"stoppingCondition,omitempty"` - Tags []*Tag `json:"tags,omitempty"` TrainingJobARN *string `json:"trainingJobARN,omitempty"` } @@ -1038,11 +1027,6 @@ type SubscribedWorkteam struct { SellerName *string `json:"sellerName,omitempty"` } -type Tag struct { - Key *string `json:"key,omitempty"` - Value *string `json:"value,omitempty"` -} - type TensorBoardOutputConfig struct { LocalPath *string `json:"localPath,omitempty"` S3OutputPath *string `json:"s3OutputPath,omitempty"` @@ -1110,7 +1094,6 @@ type TrainingJob_SDK struct { SecondaryStatus *string `json:"secondaryStatus,omitempty"` SecondaryStatusTransitions []*SecondaryStatusTransition `json:"secondaryStatusTransitions,omitempty"` StoppingCondition *StoppingCondition `json:"stoppingCondition,omitempty"` - Tags []*Tag `json:"tags,omitempty"` TensorBoardOutputConfig *TensorBoardOutputConfig `json:"tensorBoardOutputConfig,omitempty"` TrainingEndTime *metav1.Time `json:"trainingEndTime,omitempty"` TrainingJobARN *string `json:"trainingJobARN,omitempty"` @@ -1176,7 +1159,6 @@ type TransformJob_SDK struct { MaxPayloadInMB *int64 `json:"maxPayloadInMB,omitempty"` ModelClientConfig *ModelClientConfig `json:"modelClientConfig,omitempty"` ModelName *string `json:"modelName,omitempty"` - Tags []*Tag `json:"tags,omitempty"` TransformEndTime *metav1.Time `json:"transformEndTime,omitempty"` TransformInput *TransformInput `json:"transformInput,omitempty"` TransformJobARN *string `json:"transformJobARN,omitempty"` @@ -1210,7 +1192,6 @@ type Trial struct { DisplayName *string `json:"displayName,omitempty"` ExperimentName *string `json:"experimentName,omitempty"` LastModifiedTime *metav1.Time `json:"lastModifiedTime,omitempty"` - Tags []*Tag `json:"tags,omitempty"` TrialName *string `json:"trialName,omitempty"` } @@ -1220,7 +1201,6 @@ type TrialComponent struct { EndTime *metav1.Time `json:"endTime,omitempty"` LastModifiedTime *metav1.Time `json:"lastModifiedTime,omitempty"` StartTime *metav1.Time `json:"startTime,omitempty"` - Tags []*Tag `json:"tags,omitempty"` TrialComponentName *string `json:"trialComponentName,omitempty"` } diff --git a/apis/v1alpha1/zz_generated.deepcopy.go b/apis/v1alpha1/zz_generated.deepcopy.go index ade280b2..be84bf5b 100644 --- a/apis/v1alpha1/zz_generated.deepcopy.go +++ b/apis/v1alpha1/zz_generated.deepcopy.go @@ -1753,17 +1753,6 @@ func (in *EndpointConfigSpec) DeepCopyInto(out *EndpointConfigSpec) { } } } - if in.Tags != nil { - in, out := &in.Tags, &out.Tags - *out = make([]*Tag, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(Tag) - (*in).DeepCopyInto(*out) - } - } - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointConfigSpec. @@ -1931,17 +1920,6 @@ func (in *EndpointSpec) DeepCopyInto(out *EndpointSpec) { *out = new(string) **out = **in } - if in.Tags != nil { - in, out := &in.Tags, &out.Tags - *out = make([]*Tag, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(Tag) - (*in).DeepCopyInto(*out) - } - } - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSpec. @@ -1973,10 +1951,9 @@ func (in *EndpointStatus) DeepCopyInto(out *EndpointStatus) { } } } - if in.EndpointConfigName != nil { - in, out := &in.EndpointConfigName, &out.EndpointConfigName - *out = new(string) - **out = **in + if in.CreationTime != nil { + in, out := &in.CreationTime, &out.CreationTime + *out = (*in).DeepCopy() } if in.EndpointStatus != nil { in, out := &in.EndpointStatus, &out.EndpointStatus @@ -1988,6 +1965,31 @@ func (in *EndpointStatus) DeepCopyInto(out *EndpointStatus) { *out = new(string) **out = **in } + if in.LastEndpointConfigNameForUpdate != nil { + in, out := &in.LastEndpointConfigNameForUpdate, &out.LastEndpointConfigNameForUpdate + *out = new(string) + **out = **in + } + if in.LastModifiedTime != nil { + in, out := &in.LastModifiedTime, &out.LastModifiedTime + *out = (*in).DeepCopy() + } + if in.LatestEndpointConfigName != nil { + in, out := &in.LatestEndpointConfigName, &out.LatestEndpointConfigName + *out = new(string) + **out = **in + } + if in.ProductionVariants != nil { + in, out := &in.ProductionVariants, &out.ProductionVariants + *out = make([]*ProductionVariantSummary, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(ProductionVariantSummary) + (*in).DeepCopyInto(*out) + } + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointStatus. @@ -2090,17 +2092,6 @@ func (in *Endpoint_SDK) DeepCopyInto(out *Endpoint_SDK) { } } } - if in.Tags != nil { - in, out := &in.Tags, &out.Tags - *out = make([]*Tag, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(Tag) - (*in).DeepCopyInto(*out) - } - } - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoint_SDK. @@ -2134,17 +2125,6 @@ func (in *Experiment) DeepCopyInto(out *Experiment) { in, out := &in.LastModifiedTime, &out.LastModifiedTime *out = (*in).DeepCopy() } - if in.Tags != nil { - in, out := &in.Tags, &out.Tags - *out = make([]*Tag, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(Tag) - (*in).DeepCopyInto(*out) - } - } - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Experiment. @@ -2238,17 +2218,6 @@ func (in *FeatureGroup) DeepCopyInto(out *FeatureGroup) { *out = new(string) **out = **in } - if in.Tags != nil { - in, out := &in.Tags, &out.Tags - *out = make([]*Tag, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(Tag) - (*in).DeepCopyInto(*out) - } - } - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGroup. @@ -2839,17 +2808,6 @@ func (in *HyperParameterTuningJobSpec) DeepCopyInto(out *HyperParameterTuningJob *out = new(string) **out = **in } - if in.Tags != nil { - in, out := &in.Tags, &out.Tags - *out = make([]*Tag, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(Tag) - (*in).DeepCopyInto(*out) - } - } - } if in.TrainingJobDefinition != nil { in, out := &in.TrainingJobDefinition, &out.TrainingJobDefinition *out = new(HyperParameterTrainingJobDefinition) @@ -3645,17 +3603,6 @@ func (in *ModelPackage) DeepCopyInto(out *ModelPackage) { in, out := &in.LastModifiedTime, &out.LastModifiedTime *out = (*in).DeepCopy() } - if in.Tags != nil { - in, out := &in.Tags, &out.Tags - *out = make([]*Tag, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(Tag) - (*in).DeepCopyInto(*out) - } - } - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModelPackage. @@ -3698,32 +3645,6 @@ func (in *ModelPackageContainerDefinition) DeepCopy() *ModelPackageContainerDefi return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ModelPackageGroup) DeepCopyInto(out *ModelPackageGroup) { - *out = *in - if in.Tags != nil { - in, out := &in.Tags, &out.Tags - *out = make([]*Tag, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(Tag) - (*in).DeepCopyInto(*out) - } - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModelPackageGroup. -func (in *ModelPackageGroup) DeepCopy() *ModelPackageGroup { - if in == nil { - return nil - } - out := new(ModelPackageGroup) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ModelPackageStatusItem) DeepCopyInto(out *ModelPackageStatusItem) { *out = *in @@ -3864,17 +3785,6 @@ func (in *ModelSpec) DeepCopyInto(out *ModelSpec) { *out = new(ContainerDefinition) (*in).DeepCopyInto(*out) } - if in.Tags != nil { - in, out := &in.Tags, &out.Tags - *out = make([]*Tag, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(Tag) - (*in).DeepCopyInto(*out) - } - } - } if in.VPCConfig != nil { in, out := &in.VPCConfig, &out.VPCConfig *out = new(VPCConfig) @@ -4260,17 +4170,6 @@ func (in *MonitoringSchedule) DeepCopyInto(out *MonitoringSchedule) { *out = new(string) **out = **in } - if in.Tags != nil { - in, out := &in.Tags, &out.Tags - *out = make([]*Tag, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(Tag) - (*in).DeepCopyInto(*out) - } - } - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringSchedule. @@ -4639,17 +4538,6 @@ func (in *Pipeline) DeepCopyInto(out *Pipeline) { *out = new(string) **out = **in } - if in.Tags != nil { - in, out := &in.Tags, &out.Tags - *out = make([]*Tag, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(Tag) - (*in).DeepCopyInto(*out) - } - } - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Pipeline. @@ -4982,17 +4870,6 @@ func (in *ProcessingJobSpec) DeepCopyInto(out *ProcessingJobSpec) { *out = new(ProcessingStoppingCondition) (*in).DeepCopyInto(*out) } - if in.Tags != nil { - in, out := &in.Tags, &out.Tags - *out = make([]*Tag, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(Tag) - (*in).DeepCopyInto(*out) - } - } - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProcessingJobSpec. @@ -5233,17 +5110,6 @@ func (in *ProcessingJob_SDK) DeepCopyInto(out *ProcessingJob_SDK) { *out = new(ProcessingStoppingCondition) (*in).DeepCopyInto(*out) } - if in.Tags != nil { - in, out := &in.Tags, &out.Tags - *out = make([]*Tag, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(Tag) - (*in).DeepCopyInto(*out) - } - } - } if in.TrainingJobARN != nil { in, out := &in.TrainingJobARN, &out.TrainingJobARN *out = new(string) @@ -6135,31 +6001,6 @@ func (in *SubscribedWorkteam) DeepCopy() *SubscribedWorkteam { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Tag) DeepCopyInto(out *Tag) { - *out = *in - if in.Key != nil { - in, out := &in.Key, &out.Key - *out = new(string) - **out = **in - } - if in.Value != nil { - in, out := &in.Value, &out.Value - *out = new(string) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Tag. -func (in *Tag) DeepCopy() *Tag { - if in == nil { - return nil - } - out := new(Tag) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TensorBoardOutputConfig) DeepCopyInto(out *TensorBoardOutputConfig) { *out = *in @@ -6446,17 +6287,6 @@ func (in *TrainingJobSpec) DeepCopyInto(out *TrainingJobSpec) { *out = new(StoppingCondition) (*in).DeepCopyInto(*out) } - if in.Tags != nil { - in, out := &in.Tags, &out.Tags - *out = make([]*Tag, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(Tag) - (*in).DeepCopyInto(*out) - } - } - } if in.TensorBoardOutputConfig != nil { in, out := &in.TensorBoardOutputConfig, &out.TensorBoardOutputConfig *out = new(TensorBoardOutputConfig) @@ -6798,17 +6628,6 @@ func (in *TrainingJob_SDK) DeepCopyInto(out *TrainingJob_SDK) { *out = new(StoppingCondition) (*in).DeepCopyInto(*out) } - if in.Tags != nil { - in, out := &in.Tags, &out.Tags - *out = make([]*Tag, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(Tag) - (*in).DeepCopyInto(*out) - } - } - } if in.TensorBoardOutputConfig != nil { in, out := &in.TensorBoardOutputConfig, &out.TensorBoardOutputConfig *out = new(TensorBoardOutputConfig) @@ -7127,17 +6946,6 @@ func (in *TransformJobSpec) DeepCopyInto(out *TransformJobSpec) { *out = new(string) **out = **in } - if in.Tags != nil { - in, out := &in.Tags, &out.Tags - *out = make([]*Tag, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(Tag) - (*in).DeepCopyInto(*out) - } - } - } if in.TransformInput != nil { in, out := &in.TransformInput, &out.TransformInput *out = new(TransformInput) @@ -7350,17 +7158,6 @@ func (in *TransformJob_SDK) DeepCopyInto(out *TransformJob_SDK) { *out = new(string) **out = **in } - if in.Tags != nil { - in, out := &in.Tags, &out.Tags - *out = make([]*Tag, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(Tag) - (*in).DeepCopyInto(*out) - } - } - } if in.TransformEndTime != nil { in, out := &in.TransformEndTime, &out.TransformEndTime *out = (*in).DeepCopy() @@ -7522,17 +7319,6 @@ func (in *Trial) DeepCopyInto(out *Trial) { in, out := &in.LastModifiedTime, &out.LastModifiedTime *out = (*in).DeepCopy() } - if in.Tags != nil { - in, out := &in.Tags, &out.Tags - *out = make([]*Tag, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(Tag) - (*in).DeepCopyInto(*out) - } - } - } if in.TrialName != nil { in, out := &in.TrialName, &out.TrialName *out = new(string) @@ -7574,17 +7360,6 @@ func (in *TrialComponent) DeepCopyInto(out *TrialComponent) { in, out := &in.StartTime, &out.StartTime *out = (*in).DeepCopy() } - if in.Tags != nil { - in, out := &in.Tags, &out.Tags - *out = make([]*Tag, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(Tag) - (*in).DeepCopyInto(*out) - } - } - } if in.TrialComponentName != nil { in, out := &in.TrialComponentName, &out.TrialComponentName *out = new(string) diff --git a/config/crd/bases/sagemaker.services.k8s.aws_endpointconfigs.yaml b/config/crd/bases/sagemaker.services.k8s.aws_endpointconfigs.yaml index fa59387b..febb2b14 100644 --- a/config/crd/bases/sagemaker.services.k8s.aws_endpointconfigs.yaml +++ b/config/crd/bases/sagemaker.services.k8s.aws_endpointconfigs.yaml @@ -67,10 +67,36 @@ spec: type: string type: object endpointConfigName: + description: The name of the endpoint configuration. You specify this + name in a CreateEndpoint request. type: string kmsKeyID: + description: "The Amazon Resource Name (ARN) of a AWS Key Management + Service key that Amazon SageMaker uses to encrypt data on the storage + volume attached to the ML compute instance that hosts the endpoint. + \n The KmsKeyId can be any of the following formats: \n * Key + ID: 1234abcd-12ab-34cd-56ef-1234567890ab \n * Key ARN: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + \n * Alias name: alias/ExampleAlias \n * Alias name ARN: arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias + \n The KMS key policy must grant permission to the IAM role that + you specify in your CreateEndpoint, UpdateEndpoint requests. For + more information, refer to the AWS Key Management Service section + Using Key Policies in AWS KMS (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html) + \n Certain Nitro-based instances include local storage, dependent + on the instance type. Local storage volumes are encrypted using + a hardware module on the instance. You can't request a KmsKeyId + when using an instance type with local storage. If any of the models + that you specify in the ProductionVariants parameter use nitro-based + instances with local storage, do not specify a value for the KmsKeyId + parameter. If you specify a value for KmsKeyId when using any nitro-based + instances with local storage, the call to CreateEndpointConfig fails. + \n For a list of instance types that support local instance storage, + see Instance Store Volumes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html#instance-store-volumes). + \n For more information about local instance storage encryption, + see SSD Instance Store Volumes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ssd-instance-store.html)." type: string productionVariants: + description: An list of ProductionVariant objects, one for each model + that you want to host at this endpoint. items: properties: acceleratorType: @@ -95,15 +121,6 @@ spec: type: string type: object type: array - tags: - items: - properties: - key: - type: string - value: - type: string - type: object - type: array required: - endpointConfigName - productionVariants diff --git a/config/crd/bases/sagemaker.services.k8s.aws_endpoints.yaml b/config/crd/bases/sagemaker.services.k8s.aws_endpoints.yaml index ca6a3440..3c848867 100644 --- a/config/crd/bases/sagemaker.services.k8s.aws_endpoints.yaml +++ b/config/crd/bases/sagemaker.services.k8s.aws_endpoints.yaml @@ -44,18 +44,15 @@ spec: description: EndpointSpec defines the desired state of Endpoint properties: endpointConfigName: + description: The name of an endpoint configuration. For more information, + see CreateEndpointConfig. type: string endpointName: + description: The name of the endpoint.The name must be unique within + an AWS Region in your AWS account. The name is case-insensitive + in CreateEndpoint, but the case is preserved and must be matched + in . type: string - tags: - items: - properties: - key: - type: string - value: - type: string - type: object - type: array required: - endpointConfigName - endpointName @@ -119,12 +116,78 @@ spec: - type type: object type: array - endpointConfigName: + creationTime: + description: A timestamp that shows when the endpoint was created. + format: date-time type: string endpointStatus: + description: "The status of the endpoint. \n * OutOfService: Endpoint + is not available to take incoming requests. \n * Creating: CreateEndpoint + is executing. \n * Updating: UpdateEndpoint or UpdateEndpointWeightsAndCapacities + is executing. \n * SystemUpdating: Endpoint is undergoing maintenance + and cannot be updated or deleted or re-scaled until it has completed. + This maintenance operation does not change any customer-specified + values such as VPC config, KMS encryption, model, instance type, + or instance count. \n * RollingBack: Endpoint fails to scale + up or down or change its variant weight and is in the process + of rolling back to its previous configuration. Once the rollback + completes, endpoint returns to an InService status. This transitional + status only applies to an endpoint that has autoscaling enabled + and is undergoing variant weight or capacity changes as part of + \ an UpdateEndpointWeightsAndCapacities call or when the UpdateEndpointWeightsAndCapacities + \ operation is called explicitly. \n * InService: Endpoint + is available to process incoming requests. \n * Deleting: DeleteEndpoint + is executing. \n * Failed: Endpoint could not be created, updated, + or re-scaled. Use DescribeEndpointOutput$FailureReason for information + about the failure. DeleteEndpoint is the only operation that + can be performed on a failed endpoint." type: string failureReason: + description: If the status of the endpoint is Failed, the reason why + it failed. + type: string + lastEndpointConfigNameForUpdate: + description: Name of the Amazon SageMaker endpoint configuration. type: string + lastModifiedTime: + description: A timestamp that shows when the endpoint was last modified. + format: date-time + type: string + latestEndpointConfigName: + description: The name of the endpoint configuration associated with + this endpoint. + type: string + productionVariants: + description: An array of ProductionVariantSummary objects, one for + each model hosted behind this endpoint. + items: + properties: + currentInstanceCount: + format: int64 + type: integer + currentWeight: + type: number + deployedImages: + items: + properties: + resolutionTime: + format: date-time + type: string + resolvedImage: + type: string + specifiedImage: + type: string + type: object + type: array + desiredInstanceCount: + format: int64 + type: integer + desiredWeight: + type: number + variantName: + type: string + type: object + type: array required: - ackResourceMetadata - conditions diff --git a/config/crd/bases/sagemaker.services.k8s.aws_hyperparametertuningjobs.yaml b/config/crd/bases/sagemaker.services.k8s.aws_hyperparametertuningjobs.yaml index 3c0ea25e..f0736f81 100644 --- a/config/crd/bases/sagemaker.services.k8s.aws_hyperparametertuningjobs.yaml +++ b/config/crd/bases/sagemaker.services.k8s.aws_hyperparametertuningjobs.yaml @@ -46,6 +46,11 @@ spec: HyperParameterTuningJob properties: hyperParameterTuningJobConfig: + description: The HyperParameterTuningJobConfig object that describes + the tuning job, including the search strategy, the objective metric + used to evaluate training jobs, ranges of parameters to search, + and resource limits for the tuning job. For more information, see + How Hyperparameter Tuning Works (https://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning-how-it-works.html). properties: hyperParameterTuningJobObjective: properties: @@ -114,17 +119,17 @@ spec: type: object type: object hyperParameterTuningJobName: + description: 'The name of the tuning job. This name is the prefix + for the names of all training jobs that this tuning job launches. + The name must be unique within the same AWS account and AWS Region. + The name must have 1 to 32 characters. Valid characters are a-z, + A-Z, 0-9, and : + = @ _ % - (hyphen). The name is not case sensitive.' type: string - tags: - items: - properties: - key: - type: string - value: - type: string - type: object - type: array trainingJobDefinition: + description: The HyperParameterTrainingJobDefinition object that describes + the training jobs that this tuning job launches, including static + hyperparameters, input data configuration, output data configuration, + resource configuration, and stopping condition. properties: algorithmSpecification: properties: @@ -302,6 +307,8 @@ spec: type: object type: object trainingJobDefinitions: + description: A list of the HyperParameterTrainingJobDefinition objects + launched for this tuning job. items: properties: algorithmSpecification: @@ -481,6 +488,20 @@ spec: type: object type: array warmStartConfig: + description: "Specifies the configuration for starting the hyperparameter + tuning job using one or more previous tuning jobs as a starting + point. The results of previous tuning jobs are used to inform which + combinations of hyperparameters to search over in the new tuning + job. \n All training jobs launched by the new hyperparameter tuning + job are evaluated by using the objective metric. If you specify + IDENTICAL_DATA_AND_ALGORITHM as the WarmStartType value for the + warm start configuration, the training job that performs the best + in the new tuning job is compared to the best training jobs from + the parent tuning jobs. From these, the training job that performs + the best as measured by the objective metric is returned as the + overall best training job. \n All training jobs launched by parent + hyperparameter tuning jobs and the new hyperparameter tuning jobs + count against the limit of training jobs for the tuning job." properties: parentHyperParameterTuningJobs: items: @@ -524,6 +545,8 @@ spec: - ownerAccountID type: object bestTrainingJob: + description: A TrainingJobSummary object that describes the training + job that completed with the best current HyperParameterTuningJobObjective. properties: creationTime: format: date-time @@ -596,10 +619,18 @@ spec: type: object type: array failureReason: + description: If the tuning job failed, the reason it failed. type: string hyperParameterTuningJobStatus: + description: 'The status of the tuning job: InProgress, Completed, + Failed, Stopping, or Stopped.' type: string overallBestTrainingJob: + description: If the hyperparameter tuning job is an warm start tuning + job with a WarmStartType of IDENTICAL_DATA_AND_ALGORITHM, this is + the TrainingJobSummary for the training job with the best objective + metric value of all training jobs launched by this tuning job and + all parent jobs specified for the warm start tuning job. properties: creationTime: format: date-time diff --git a/config/crd/bases/sagemaker.services.k8s.aws_models.yaml b/config/crd/bases/sagemaker.services.k8s.aws_models.yaml index dcd8f72a..31569ab5 100644 --- a/config/crd/bases/sagemaker.services.k8s.aws_models.yaml +++ b/config/crd/bases/sagemaker.services.k8s.aws_models.yaml @@ -37,6 +37,7 @@ spec: description: ModelSpec defines the desired state of Model properties: containers: + description: Specifies the containers in the inference pipeline. items: properties: containerHostname: @@ -66,17 +67,32 @@ spec: type: object type: array enableNetworkIsolation: + description: Isolates the model container. No inbound or outbound + network calls can be made to or from the model container. type: boolean executionRoleARN: + description: "The Amazon Resource Name (ARN) of the IAM role that + Amazon SageMaker can assume to access model artifacts and docker + image for deployment on ML compute instances or for batch transform + jobs. Deploying on ML compute instances is part of model hosting. + For more information, see Amazon SageMaker Roles (https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html). + \n To be able to pass this role to Amazon SageMaker, the caller + of this API must have the iam:PassRole permission." type: string inferenceExecutionConfig: + description: Specifies details of how containers in a multi-container + endpoint are called. properties: mode: type: string type: object modelName: + description: The name of the new model. type: string primaryContainer: + description: The location of the primary docker image containing inference + code, associated artifacts, and custom environment map that the + inference code uses when the model is deployed for predictions. properties: containerHostname: type: string @@ -103,16 +119,14 @@ spec: type: string type: object type: object - tags: - items: - properties: - key: - type: string - value: - type: string - type: object - type: array vpcConfig: + description: A VpcConfig object that specifies the VPC that you want + your model to connect to. Control access to and from your model + container by configuring the VPC. VpcConfig is used in hosting services + and in batch transform. For more information, see Protect Endpoints + by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html) + and Protect Data in Batch Transform Jobs by Using an Amazon Virtual + Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/batch-vpc.html). properties: securityGroupIDs: items: diff --git a/config/crd/bases/sagemaker.services.k8s.aws_processingjobs.yaml b/config/crd/bases/sagemaker.services.k8s.aws_processingjobs.yaml index 3abeb194..05b2ca80 100644 --- a/config/crd/bases/sagemaker.services.k8s.aws_processingjobs.yaml +++ b/config/crd/bases/sagemaker.services.k8s.aws_processingjobs.yaml @@ -44,6 +44,8 @@ spec: description: ProcessingJobSpec defines the desired state of ProcessingJob properties: appSpecification: + description: Configures the processing job to run a specified Docker + container image. properties: containerArguments: items: @@ -59,6 +61,8 @@ spec: environment: additionalProperties: type: string + description: The environment variables to set in the Docker container. + Up to 100 key and values entries in the map are supported. type: object experimentConfig: properties: @@ -70,6 +74,10 @@ spec: type: string type: object networkConfig: + description: Networking options for a processing job, such as whether + to allow inbound and outbound network calls to and from processing + containers, and the VPC subnets and security groups to use for VPC-enabled + processing jobs. properties: enableInterContainerTrafficEncryption: type: boolean @@ -88,6 +96,8 @@ spec: type: object type: object processingInputs: + description: An array of inputs configuring the data to download into + the processing container. items: properties: appManaged: @@ -161,8 +171,11 @@ spec: type: object type: array processingJobName: + description: The name of the processing job. The name must be unique + within an AWS Region in the AWS account. type: string processingOutputConfig: + description: Output configuration for the processing job. properties: kmsKeyID: type: string @@ -191,6 +204,9 @@ spec: type: array type: object processingResources: + description: Identifies the resources, ML compute instances, and ML + storage volumes to deploy for a processing job. In distributed training, + you specify more than one instance. properties: clusterConfig: properties: @@ -207,22 +223,17 @@ spec: type: object type: object roleARN: + description: The Amazon Resource Name (ARN) of an IAM role that Amazon + SageMaker can assume to perform tasks on your behalf. type: string stoppingCondition: + description: The time limit for how long the processing job is allowed + to run. properties: maxRuntimeInSeconds: format: int64 type: integer type: object - tags: - items: - properties: - key: - type: string - value: - type: string - type: object - type: array required: - appSpecification - processingJobName @@ -289,8 +300,11 @@ spec: type: object type: array failureReason: + description: A string, up to one KB in size, that contains the reason + a processing job failed, if it failed. type: string processingJobStatus: + description: Provides the status of a processing job. type: string required: - ackResourceMetadata diff --git a/config/crd/bases/sagemaker.services.k8s.aws_trainingjobs.yaml b/config/crd/bases/sagemaker.services.k8s.aws_trainingjobs.yaml index f1ea484d..c3b79c69 100644 --- a/config/crd/bases/sagemaker.services.k8s.aws_trainingjobs.yaml +++ b/config/crd/bases/sagemaker.services.k8s.aws_trainingjobs.yaml @@ -44,6 +44,12 @@ spec: description: TrainingJobSpec defines the desired state of TrainingJob properties: algorithmSpecification: + description: The registry path of the Docker image that contains the + training algorithm and algorithm-specific metadata, including the + input mode. For more information about algorithms provided by Amazon + SageMaker, see Algorithms (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). + For information about providing your own algorithms, see Using Your + Own Algorithms with Amazon SageMaker (https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms.html). properties: algorithmName: type: string @@ -64,6 +70,8 @@ spec: type: string type: object checkpointConfig: + description: Contains information about the output location for managed + spot training checkpoint data. properties: localPath: type: string @@ -93,6 +101,8 @@ spec: type: string type: object debugRuleConfigurations: + description: Configuration information for Debugger rules for debugging + output tensors. items: properties: instanceType: @@ -115,10 +125,34 @@ spec: type: object type: array enableInterContainerTrafficEncryption: + description: To encrypt all communications between ML compute instances + in distributed training, choose True. Encryption provides greater + security for distributed training, but training might take longer. + How long it takes depends on the amount of communication between + compute instances, especially if you use a deep learning algorithm + in distributed training. For more information, see Protect Communications + Between ML Compute Instances in a Distributed Training Job (https://docs.aws.amazon.com/sagemaker/latest/dg/train-encrypt.html). type: boolean enableManagedSpotTraining: + description: "To train models using managed spot training, choose + True. Managed spot training provides a fully managed and scalable + infrastructure for training machine learning models. this option + is useful when training jobs can be interrupted and when there is + flexibility when the training job is run. \n The complete and intermediate + results of jobs are stored in an Amazon S3 bucket, and can be used + as a starting point to train models incrementally. Amazon SageMaker + provides metrics and logs in CloudWatch. They can be used to see + when managed spot training jobs are running, interrupted, resumed, + or completed." type: boolean enableNetworkIsolation: + description: Isolates the training container. No inbound or outbound + network calls can be made, except for calls between peers within + a training cluster for distributed training. If you enable network + isolation for training jobs that are configured to use a VPC, Amazon + SageMaker downloads and uploads customer data and model artifacts + through the specified VPC, but the training container does not have + network access. type: boolean experimentConfig: properties: @@ -132,8 +166,29 @@ spec: hyperParameters: additionalProperties: type: string + description: "Algorithm-specific parameters that influence the quality + of the model. You set hyperparameters before you start the learning + process. For a list of hyperparameters for each training algorithm + provided by Amazon SageMaker, see Algorithms (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). + \n You can specify a maximum of 100 hyperparameters. Each hyperparameter + is a key-value pair. Each key and value is limited to 256 characters, + as specified by the Length Constraint." type: object inputDataConfig: + description: "An array of Channel objects. Each channel is a named + input source. InputDataConfig describes the input data and its location. + \n Algorithms can accept input data from one or more channels. For + example, an algorithm might have two channels of input data, training_data + and validation_data. The configuration for each channel provides + the S3, EFS, or FSx location where the input data is stored. It + also provides information about the stored data: the MIME type, + compression method, and whether the data is wrapped in RecordIO + format. \n Depending on the input mode that the algorithm supports, + Amazon SageMaker either copies input data files from an S3 bucket + to a local directory in the Docker container, or makes it available + as input streams. For example, if you specify an EFS location, input + data files will be made available as input streams. They do not + need to be downloaded." items: properties: channelName: @@ -182,6 +237,9 @@ spec: type: object type: array outputDataConfig: + description: Specifies the path to the S3 location where you want + to store model artifacts. Amazon SageMaker creates subfolders for + the artifacts. properties: kmsKeyID: type: string @@ -201,6 +259,8 @@ spec: type: string type: object profilerRuleConfigurations: + description: Configuration information for Debugger rules for profiling + system and framework metrics. items: properties: instanceType: @@ -223,6 +283,14 @@ spec: type: object type: array resourceConfig: + description: "The resources, including the ML compute instances and + ML storage volumes, to use for model training. \n ML storage volumes + store model artifacts and incremental states. Training algorithms + might also use ML storage volumes for scratch space. If you want + Amazon SageMaker to use the ML storage volume to store the training + data, choose File as the TrainingInputMode in the algorithm specification. + For distributed training algorithms, specify an instance count greater + than 1." properties: instanceCount: format: int64 @@ -236,8 +304,25 @@ spec: type: integer type: object roleARN: + description: "The Amazon Resource Name (ARN) of an IAM role that Amazon + SageMaker can assume to perform tasks on your behalf. \n During + model training, Amazon SageMaker needs your permission to read input + data from an S3 bucket, download a Docker image that contains training + code, write model artifacts to an S3 bucket, write logs to Amazon + CloudWatch Logs, and publish metrics to Amazon CloudWatch. You grant + permissions for all of these tasks to an IAM role. For more information, + see Amazon SageMaker Roles (https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html). + \n To be able to pass this role to Amazon SageMaker, the caller + of this API must have the iam:PassRole permission." type: string stoppingCondition: + description: "Specifies a limit to how long a model training job can + run. When the job reaches the time limit, Amazon SageMaker ends + the training job. Use this API to cap model training costs. \n To + stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal, + which delays job termination for 120 seconds. Algorithms can use + this 120-second window to save the model artifacts, so the results + of training are not lost." properties: maxRuntimeInSeconds: format: int64 @@ -246,15 +331,6 @@ spec: format: int64 type: integer type: object - tags: - items: - properties: - key: - type: string - value: - type: string - type: object - type: array tensorBoardOutputConfig: properties: localPath: @@ -263,8 +339,15 @@ spec: type: string type: object trainingJobName: + description: The name of the training job. The name must be unique + within an AWS Region in an AWS account. type: string vpcConfig: + description: A VpcConfig object that specifies the VPC that you want + your training job to connect to. Control access to and from your + training container by configuring the VPC. For more information, + see Protect Training Jobs by Using an Amazon Virtual Private Cloud + (https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html). properties: securityGroupIDs: items: @@ -343,10 +426,42 @@ spec: type: object type: array failureReason: + description: If the training job failed, the reason it failed. type: string secondaryStatus: + description: "Provides detailed information about the state of the + training job. For detailed information on the secondary status of + the training job, see StatusMessage under SecondaryStatusTransition. + \n Amazon SageMaker provides primary statuses and secondary statuses + that apply to each of them: \n InProgress \n * Starting - Starting + the training job. \n * Downloading - An optional stage for algorithms + that support File training input mode. It indicates that data + is being downloaded to the ML storage volumes. \n * Training + - Training is in progress. \n * Interrupted - The job stopped + because the managed spot training instances were interrupted. + \n * Uploading - Training is complete and the model artifacts + are being uploaded to the S3 location. \n Completed \n * Completed + - The training job has completed. \n Failed \n * Failed - The + training job has failed. The reason for the failure is returned + in the FailureReason field of DescribeTrainingJobResponse. \n Stopped + \n * MaxRuntimeExceeded - The job stopped because it exceeded + the maximum allowed runtime. \n * MaxWaitTimeExceeded - The + job stopped because it exceeded the maximum allowed wait time. + \n * Stopped - The training job has stopped. \n Stopping \n * + Stopping - Stopping the training job. \n Valid values for SecondaryStatus + are subject to change. \n We no longer support the following secondary + statuses: \n * LaunchingMLInstances \n * PreparingTrainingStack + \n * DownloadingTrainingImage" type: string trainingJobStatus: + description: "The status of the training job. \n Amazon SageMaker + provides the following training job statuses: \n * InProgress + - The training is in progress. \n * Completed - The training + job has completed. \n * Failed - The training job has failed. + To see the reason for the failure, see the FailureReason field + in the response to a DescribeTrainingJobResponse call. \n * + Stopping - The training job is stopping. \n * Stopped - The training + job has stopped. \n For more detailed information, see SecondaryStatus." type: string required: - ackResourceMetadata diff --git a/config/crd/bases/sagemaker.services.k8s.aws_transformjobs.yaml b/config/crd/bases/sagemaker.services.k8s.aws_transformjobs.yaml index 806a9d3c..4dd1ce6c 100644 --- a/config/crd/bases/sagemaker.services.k8s.aws_transformjobs.yaml +++ b/config/crd/bases/sagemaker.services.k8s.aws_transformjobs.yaml @@ -44,8 +44,26 @@ spec: description: TransformJobSpec defines the desired state of TransformJob properties: batchStrategy: + description: "Specifies the number of records to include in a mini-batch + for an HTTP inference request. A record is a single unit of input + data that inference can be made on. For example, a single line in + a CSV file is a record. \n To enable the batch strategy, you must + set the SplitType property to Line, RecordIO, or TFRecord. \n To + use only one record when making an HTTP invocation request to a + container, set BatchStrategy to SingleRecord and SplitType to Line. + \n To fit as many records in a mini-batch as can fit within the + MaxPayloadInMB limit, set BatchStrategy to MultiRecord and SplitType + to Line." type: string dataProcessing: + description: The data structure used to specify the data to be used + for inference in a batch transform job and to associate the data + that is relevant to the prediction results in the output. The input + filter provided allows you to exclude input data that is not needed + for inference in a batch transform job. The output filter provided + allows you to include input data relevant to interpreting the predictions + in the output from the job. For more information, see Associate + Prediction Results with their Corresponding Input Records (https://docs.aws.amazon.com/sagemaker/latest/dg/batch-transform-data-processing.html). properties: inputFilter: type: string @@ -57,6 +75,8 @@ spec: environment: additionalProperties: type: string + description: The environment variables to set in the Docker container. + We support up to 16 key and values entries in the map. type: object experimentConfig: properties: @@ -68,12 +88,33 @@ spec: type: string type: object maxConcurrentTransforms: + description: The maximum number of parallel requests that can be sent + to each instance in a transform job. If MaxConcurrentTransforms + is set to 0 or left unset, Amazon SageMaker checks the optional + execution-parameters to determine the settings for your chosen algorithm. + If the execution-parameters endpoint is not enabled, the default + value is 1. For more information on execution-parameters, see How + Containers Serve Requests (https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-batch-code.html#your-algorithms-batch-code-how-containe-serves-requests). + For built-in algorithms, you don't need to set a value for MaxConcurrentTransforms. format: int64 type: integer maxPayloadInMB: + description: "The maximum allowed size of the payload, in MB. A payload + is the data portion of a record (without metadata). The value in + MaxPayloadInMB must be greater than, or equal to, the size of a + single record. To estimate the size of a record in MB, divide the + size of your dataset by the number of records. To ensure that the + records fit within the maximum payload size, we recommend using + a slightly larger value. The default value is 6 MB. \n For cases + where the payload might be arbitrarily large and is transmitted + using HTTP chunked encoding, set the value to 0. This feature works + only in supported algorithms. Currently, Amazon SageMaker built-in + algorithms do not support HTTP chunked encoding." format: int64 type: integer modelClientConfig: + description: Configures the timeout and maximum number of retries + for processing a transform job invocation. properties: invocationsMaxRetries: format: int64 @@ -83,17 +124,13 @@ spec: type: integer type: object modelName: + description: The name of the model that you want to use for the transform + job. ModelName must be the name of an existing Amazon SageMaker + model within an AWS Region in an AWS account. type: string - tags: - items: - properties: - key: - type: string - value: - type: string - type: object - type: array transformInput: + description: Describes the input source and the way the transform + job consumes it. properties: compressionType: type: string @@ -113,8 +150,11 @@ spec: type: string type: object transformJobName: + description: The name of the transform job. The name must be unique + within an AWS Region in an AWS account. type: string transformOutput: + description: Describes the results of the transform job. properties: accept: type: string @@ -126,6 +166,8 @@ spec: type: string type: object transformResources: + description: Describes the resources, including ML instance types + and ML instance count, to use for the transform job. properties: instanceCount: format: int64 @@ -202,8 +244,14 @@ spec: type: object type: array failureReason: + description: If the transform job failed, FailureReason describes + why it failed. A transform job creates a log file, which includes + error messages, and stores it as an Amazon S3 object. For more information, + see Log Amazon SageMaker Events with Amazon CloudWatch (https://docs.aws.amazon.com/sagemaker/latest/dg/logging-cloudwatch.html). type: string transformJobStatus: + description: The status of the transform job. If the transform job + failed, the reason is returned in the FailureReason field. type: string required: - ackResourceMetadata diff --git a/generator.yaml b/generator.yaml index 6daa7a51..6113dcf3 100644 --- a/generator.yaml +++ b/generator.yaml @@ -15,6 +15,8 @@ operations: UpdateEndpoint: custom_implementation: customUpdateEndpoint set_output_custom_method_name: customUpdateEndpointSetOutput + override_values: + RetainAllVariantProperties: true DeleteEndpoint: custom_implementation: customDeleteEndpoint StopHyperParameterTuningJob: @@ -53,26 +55,31 @@ resources: from: operation: DescribeEndpoint path: FailureReason - EndpointConfigName: + LatestEndpointConfigName: is_read_only: true from: operation: DescribeEndpoint path: EndpointConfigName - # CreationTime: - # is_read_only: true - # from: - # operation: DescribeEndpoint - # path: CreationTime - # LastModifiedTime: - # is_read_only: true - # from: - # operation: DescribeEndpoint - # path: LastModifiedTime - # ProductionVariants: - # is_read_only: true - # from: - # operation: DescribeEndpoint - # path: ProductionVariants + LastEndpointConfigNameForUpdate: + is_read_only: true + from: + operation: DescribeEndpointConfig + path: EndpointConfigName + CreationTime: + is_read_only: true + from: + operation: DescribeEndpoint + path: CreationTime + LastModifiedTime: + is_read_only: true + from: + operation: DescribeEndpoint + path: LastModifiedTime + ProductionVariants: + is_read_only: true + from: + operation: DescribeEndpoint + path: ProductionVariants TrainingJob: exceptions: errors: @@ -211,3 +218,5 @@ ignore: - Workteam operations: - UpdateTrainingJob + shape_names: + - TagList diff --git a/go.mod b/go.mod index c224891d..5b6a1185 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/aws-controllers-k8s/sagemaker-controller go 1.14 require ( - github.com/aws-controllers-k8s/runtime v0.0.4-0.20210224005142-96355b6c73d7 + github.com/aws-controllers-k8s/runtime v0.0.5 github.com/aws/aws-sdk-go v1.37.31 github.com/go-logr/logr v0.1.0 github.com/google/go-cmp v0.3.1 diff --git a/go.sum b/go.sum index 1e9793f2..04a7346b 100644 --- a/go.sum +++ b/go.sum @@ -25,6 +25,8 @@ github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:l github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws-controllers-k8s/runtime v0.0.4-0.20210224005142-96355b6c73d7 h1:HDJnBJ+JGqtWFKqfSHbUV60QPLxpsyPo+OkET7RMULI= github.com/aws-controllers-k8s/runtime v0.0.4-0.20210224005142-96355b6c73d7/go.mod h1:xA2F18PJerBHaqrS4de1lpP7skeSMeStkmh+3x5sWvw= +github.com/aws-controllers-k8s/runtime v0.0.5 h1:WdcnMNdgagF2MMPQRbDJ5OEzMMgHraCJqvvFj4Sx/5g= +github.com/aws-controllers-k8s/runtime v0.0.5/go.mod h1:xA2F18PJerBHaqrS4de1lpP7skeSMeStkmh+3x5sWvw= github.com/aws/aws-sdk-go v1.37.4 h1:tWxrpMK/oRSXVnjUzhGeCWLR00fW0WF4V4sycYPPrJ8= github.com/aws/aws-sdk-go v1.37.4/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.37.31 h1:eK7hgg1H4xivwopAbnzfQ7ZBbDb9cEkGDivd9rUMnJs= diff --git a/pkg/resource/endpoint/custom_set_output.go b/pkg/resource/endpoint/custom_set_output.go index d1c925c0..e25f713f 100644 --- a/pkg/resource/endpoint/custom_set_output.go +++ b/pkg/resource/endpoint/custom_set_output.go @@ -47,6 +47,19 @@ func (rm *resourceManager) customDescribeEndpointSetOutput( ko *svcapitypes.Endpoint, ) (*svcapitypes.Endpoint, error) { rm.customSetOutput(r, resp.EndpointStatus, ko) + // Workaround: Field config for LatestEndpointConfigName of generator config + // does not code generate this correctly since this field is part of Spec + // SageMaker users will need following information: + // - latestEndpointConfig + // - desiredEndpointConfig + // - LastEndpointConfigNameForUpdate + // - FailureReason + // to determine the correct course of action in case of update to Endpoint fails + if resp.EndpointConfigName != nil { + ko.Status.LatestEndpointConfigName = resp.EndpointConfigName + } else { + ko.Status.LatestEndpointConfigName = nil + } return ko, nil } @@ -59,6 +72,8 @@ func (rm *resourceManager) customUpdateEndpointSetOutput( ko *svcapitypes.Endpoint, ) (*svcapitypes.Endpoint, error) { rm.customSetOutput(r, aws.String(svcsdk.EndpointStatusUpdating), ko) + // no nil check present here since Spec.EndpointConfigName is a required field + ko.Status.LastEndpointConfigNameForUpdate = r.ko.Spec.EndpointConfigName return ko, nil } diff --git a/pkg/resource/endpoint/custom_update_api.go b/pkg/resource/endpoint/custom_update_api.go index 8654dd6a..1751b3f1 100644 --- a/pkg/resource/endpoint/custom_update_api.go +++ b/pkg/resource/endpoint/custom_update_api.go @@ -19,21 +19,73 @@ package endpoint import ( "context" "errors" + "fmt" + "strings" ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" "github.com/aws-controllers-k8s/runtime/pkg/requeue" svcsdk "github.com/aws/aws-sdk-go/service/sagemaker" ) -// customUpdateEndpoint adds specialized logic to requeueAfter until endpoint is in -// InService state before an updateEndpoint can be called +var ( + FailUpdateError = fmt.Errorf("Unable to update Endpoint. Check FailureReason") + + FailureReasonInternalServiceErrorPrefix = "Request to service failed" +) + +// customUpdateEndpoint adds specialized logic to check if controller should +// proceeed with updateEndpoint call. +// Update is blocked in the following cases: +// 1. while EndpointStatus != InService +// 2. EndpointStatus == Failed +// 3. A previous update to the Endpoint with same endpointConfigName failed +// Method returns nil if endpoint can be updated, otherwise error depending on above cases func (rm *resourceManager) customUpdateEndpoint( ctx context.Context, desired *resource, latest *resource, - diffReporter *ackcompare.Reporter, + delta *ackcompare.Delta, ) (*resource, error) { - return nil, rm.endpointStatusAllowUpdates(ctx, latest) + latestStatus := latest.ko.Status.EndpointStatus + if latestStatus == nil { + return nil, nil + } + + if *latestStatus != svcsdk.EndpointStatusFailed { + // Case 1 - requeueAfter until endpoint is in InService state + err := rm.endpointStatusAllowUpdates(ctx, latest) + if err != nil { + return nil, err + } + } + + failureReason := latest.ko.Status.FailureReason + latestEndpointConfig := latest.ko.Spec.EndpointConfigName + desiredEndpointConfig := desired.ko.Spec.EndpointConfigName + lastEndpointConfigForUpdate := desired.ko.Status.LastEndpointConfigNameForUpdate + + // Case 2 - EndpointStatus == Failed + if *latestStatus == svcsdk.EndpointStatusFailed || + // Case 3 - A previous update to the Endpoint with same endpointConfigName failed + // Following checks indicate FailureReason is related to a failed update + // Note: Internal service error is an exception for this case + // "Request to service failed" means update failed because of ISE and can be retried + (failureReason != nil && lastEndpointConfigForUpdate != nil && + !strings.HasPrefix(*failureReason, FailureReasonInternalServiceErrorPrefix) && + *desiredEndpointConfig != *latestEndpointConfig && + *desiredEndpointConfig == *lastEndpointConfigForUpdate) { + // 1. FailureReason alone does mean an update failed it can appear because of other reasons(patching/scaling failed) + // 2. *desiredEndpointConfig == *lastEndpointConfigForUpdate only tells us an update was tried with lastEndpointConfigForUpdate + // but does not tell us anything if the update was successful or not in the past because it is set if updateEndpoint returns 200 (aync operation). + // 3. Now, sdkUpdate can execute because of change in any field in Spec (like tags/deploymentConfig in future) + + // 1 & 2 does not guarantee an update Failed. Hence we need to look at `*latestEndpointConfigName` to determine if the update was unsuccessful + // `*desiredEndpointConfig != *latestEndpointConfig` + `*desiredEndpointConfig == *lastEndpointConfigForUpdate`+ `FailureReason != nil` indicate that an update is needed, + // has already been tried and failed. + return nil, FailUpdateError + } + + return nil, nil } // customDeleteEndpoint adds specialized logic to requeueAfter until endpoint is in diff --git a/pkg/resource/endpoint/custom_update_conditions.go b/pkg/resource/endpoint/custom_update_conditions.go index 728a4d2e..18c4f656 100644 --- a/pkg/resource/endpoint/custom_update_conditions.go +++ b/pkg/resource/endpoint/custom_update_conditions.go @@ -33,7 +33,9 @@ func (rm *resourceManager) customUpdateConditions( err error, ) bool { latestStatus := r.ko.Status.EndpointStatus - if latestStatus == nil || *latestStatus != svcsdk.EndpointStatusFailed { + failureReason := r.ko.Status.FailureReason + + if latestStatus == nil || failureReason == nil { return false } var terminalCondition *ackv1alpha1.Condition = nil @@ -51,20 +53,23 @@ func (rm *resourceManager) customUpdateConditions( return false } } - if terminalCondition == nil { - terminalCondition = &ackv1alpha1.Condition{ - Type: ackv1alpha1.ConditionTypeTerminal, - Status: corev1.ConditionTrue, - Reason: aws.String("Endpoint status: Failed. Cannot be updated"), - } - failureReason := r.ko.Status.FailureReason - if failureReason != nil { - terminalCondition.Message = failureReason + if (err != nil && err == FailUpdateError) || (latestStatus != nil && *latestStatus == svcsdk.EndpointStatusFailed) { + // setting terminal condition since controller can no longer recover by retrying + if terminalCondition == nil { + terminalCondition = &ackv1alpha1.Condition{ + Type: ackv1alpha1.ConditionTypeTerminal, + } + ko.Status.Conditions = append(ko.Status.Conditions, terminalCondition) } - - ko.Status.Conditions = append(ko.Status.Conditions, terminalCondition) - + terminalCondition.Status = corev1.ConditionTrue + if *latestStatus == svcsdk.EndpointStatusFailed { + terminalCondition.Message = aws.String("Cannot update endpoint with Failed status") + } else { + terminalCondition.Message = aws.String(FailUpdateError.Error()) + } + return true } - return true + + return false } diff --git a/pkg/resource/endpoint/delta.go b/pkg/resource/endpoint/delta.go new file mode 100644 index 00000000..2c214320 --- /dev/null +++ b/pkg/resource/endpoint/delta.go @@ -0,0 +1,51 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package endpoint + +import ( + ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" +) + +// newResourceDelta returns a new `ackcompare.Delta` used to compare two +// resources +func newResourceDelta( + a *resource, + b *resource, +) *ackcompare.Delta { + delta := ackcompare.NewDelta() + if (a == nil && b != nil) || + (a != nil && b == nil) { + delta.Add("", a, b) + return delta + } + + if ackcompare.HasNilDifference(a.ko.Spec.EndpointConfigName, b.ko.Spec.EndpointConfigName) { + delta.Add("Spec.EndpointConfigName", a.ko.Spec.EndpointConfigName, b.ko.Spec.EndpointConfigName) + } else if a.ko.Spec.EndpointConfigName != nil && b.ko.Spec.EndpointConfigName != nil { + if *a.ko.Spec.EndpointConfigName != *b.ko.Spec.EndpointConfigName { + delta.Add("Spec.EndpointConfigName", a.ko.Spec.EndpointConfigName, b.ko.Spec.EndpointConfigName) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.EndpointName, b.ko.Spec.EndpointName) { + delta.Add("Spec.EndpointName", a.ko.Spec.EndpointName, b.ko.Spec.EndpointName) + } else if a.ko.Spec.EndpointName != nil && b.ko.Spec.EndpointName != nil { + if *a.ko.Spec.EndpointName != *b.ko.Spec.EndpointName { + delta.Add("Spec.EndpointName", a.ko.Spec.EndpointName, b.ko.Spec.EndpointName) + } + } + + return delta +} diff --git a/pkg/resource/endpoint/descriptor.go b/pkg/resource/endpoint/descriptor.go index e9d28d2d..3948fd07 100644 --- a/pkg/resource/endpoint/descriptor.go +++ b/pkg/resource/endpoint/descriptor.go @@ -18,8 +18,6 @@ package endpoint import ( ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" k8sapirt "k8s.io/apimachinery/pkg/runtime" k8sctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -65,37 +63,10 @@ func (d *resourceDescriptor) ResourceFromRuntimeObject( } } -// Equal returns true if the two supplied AWSResources have the same content. -// The underlying types of the two supplied AWSResources should be the same. In -// other words, the Equal() method should be called with the same concrete -// implementing AWSResource type -func (d *resourceDescriptor) Equal( - a acktypes.AWSResource, - b acktypes.AWSResource, -) bool { - ac := a.(*resource) - bc := b.(*resource) - opts := []cmp.Option{cmpopts.EquateEmpty()} - return cmp.Equal(ac.ko, bc.ko, opts...) -} - -// Diff returns a Reporter which provides the difference between two supplied -// AWSResources. The underlying types of the two supplied AWSResources should -// be the same. In other words, the Diff() method should be called with the -// same concrete implementing AWSResource type -func (d *resourceDescriptor) Diff( - a acktypes.AWSResource, - b acktypes.AWSResource, -) *ackcompare.Reporter { - ac := a.(*resource) - bc := b.(*resource) - var diffReporter ackcompare.Reporter - opts := []cmp.Option{ - cmp.Reporter(&diffReporter), - cmp.AllowUnexported(svcapitypes.Endpoint{}), - } - cmp.Equal(ac.ko, bc.ko, opts...) - return &diffReporter +// Delta returns an `ackcompare.Delta` object containing the difference between +// one `AWSResource` and another. +func (d *resourceDescriptor) Delta(a, b acktypes.AWSResource) *ackcompare.Delta { + return newResourceDelta(a.(*resource), b.(*resource)) } // UpdateCRStatus accepts an AWSResource object and changes the Status diff --git a/pkg/resource/endpoint/manager.go b/pkg/resource/endpoint/manager.go index 60632779..638ff99c 100644 --- a/pkg/resource/endpoint/manager.go +++ b/pkg/resource/endpoint/manager.go @@ -125,7 +125,7 @@ func (rm *resourceManager) Update( ctx context.Context, resDesired acktypes.AWSResource, resLatest acktypes.AWSResource, - diffReporter *ackcompare.Reporter, + delta *ackcompare.Delta, ) (acktypes.AWSResource, error) { desired := rm.concreteResource(resDesired) latest := rm.concreteResource(resLatest) @@ -133,7 +133,7 @@ func (rm *resourceManager) Update( // Should never happen... if it does, it's buggy code. panic("resource manager's Update() method received resource with nil CR object") } - updated, err := rm.sdkUpdate(ctx, desired, latest, diffReporter) + updated, err := rm.sdkUpdate(ctx, desired, latest, delta) if err != nil { return rm.onError(latest, err) } diff --git a/pkg/resource/endpoint/resource.go b/pkg/resource/endpoint/resource.go index 3b438282..c413228f 100644 --- a/pkg/resource/endpoint/resource.go +++ b/pkg/resource/endpoint/resource.go @@ -24,7 +24,7 @@ import ( svcapitypes "github.com/aws-controllers-k8s/sagemaker-controller/apis/v1alpha1" ) -// resource implements the `aws-service-operator-k8s/pkg/types.AWSResource` +// resource implements the `aws-controller-k8s/runtime/pkg/types.AWSResource` // interface type resource struct { // The Kubernetes-native CR representing the resource diff --git a/pkg/resource/endpoint/sdk.go b/pkg/resource/endpoint/sdk.go index 5cccd3f4..cb42c774 100644 --- a/pkg/resource/endpoint/sdk.go +++ b/pkg/resource/endpoint/sdk.go @@ -71,6 +71,11 @@ func (rm *resourceManager) sdkFind( // the original Kubernetes object we passed to the function ko := r.ko.DeepCopy() + if resp.CreationTime != nil { + ko.Status.CreationTime = &metav1.Time{*resp.CreationTime} + } else { + ko.Status.CreationTime = nil + } if ko.Status.ACKResourceMetadata == nil { ko.Status.ACKResourceMetadata = &ackv1alpha1.ResourceMetadata{} } @@ -80,15 +85,70 @@ func (rm *resourceManager) sdkFind( } if resp.EndpointConfigName != nil { ko.Spec.EndpointConfigName = resp.EndpointConfigName + } else { + ko.Spec.EndpointConfigName = nil } if resp.EndpointName != nil { ko.Spec.EndpointName = resp.EndpointName + } else { + ko.Spec.EndpointName = nil } if resp.EndpointStatus != nil { ko.Status.EndpointStatus = resp.EndpointStatus + } else { + ko.Status.EndpointStatus = nil } if resp.FailureReason != nil { ko.Status.FailureReason = resp.FailureReason + } else { + ko.Status.FailureReason = nil + } + if resp.LastModifiedTime != nil { + ko.Status.LastModifiedTime = &metav1.Time{*resp.LastModifiedTime} + } else { + ko.Status.LastModifiedTime = nil + } + if resp.ProductionVariants != nil { + f9 := []*svcapitypes.ProductionVariantSummary{} + for _, f9iter := range resp.ProductionVariants { + f9elem := &svcapitypes.ProductionVariantSummary{} + if f9iter.CurrentInstanceCount != nil { + f9elem.CurrentInstanceCount = f9iter.CurrentInstanceCount + } + if f9iter.CurrentWeight != nil { + f9elem.CurrentWeight = f9iter.CurrentWeight + } + if f9iter.DeployedImages != nil { + f9elemf2 := []*svcapitypes.DeployedImage{} + for _, f9elemf2iter := range f9iter.DeployedImages { + f9elemf2elem := &svcapitypes.DeployedImage{} + if f9elemf2iter.ResolutionTime != nil { + f9elemf2elem.ResolutionTime = &metav1.Time{*f9elemf2iter.ResolutionTime} + } + if f9elemf2iter.ResolvedImage != nil { + f9elemf2elem.ResolvedImage = f9elemf2iter.ResolvedImage + } + if f9elemf2iter.SpecifiedImage != nil { + f9elemf2elem.SpecifiedImage = f9elemf2iter.SpecifiedImage + } + f9elemf2 = append(f9elemf2, f9elemf2elem) + } + f9elem.DeployedImages = f9elemf2 + } + if f9iter.DesiredInstanceCount != nil { + f9elem.DesiredInstanceCount = f9iter.DesiredInstanceCount + } + if f9iter.DesiredWeight != nil { + f9elem.DesiredWeight = f9iter.DesiredWeight + } + if f9iter.VariantName != nil { + f9elem.VariantName = f9iter.VariantName + } + f9 = append(f9, f9elem) + } + ko.Status.ProductionVariants = f9 + } else { + ko.Status.ProductionVariants = nil } rm.setStatusDefaults(ko) @@ -179,20 +239,6 @@ func (rm *resourceManager) newCreateRequestPayload( if r.ko.Spec.EndpointName != nil { res.SetEndpointName(*r.ko.Spec.EndpointName) } - if r.ko.Spec.Tags != nil { - f2 := []*svcsdk.Tag{} - for _, f2iter := range r.ko.Spec.Tags { - f2elem := &svcsdk.Tag{} - if f2iter.Key != nil { - f2elem.SetKey(*f2iter.Key) - } - if f2iter.Value != nil { - f2elem.SetValue(*f2iter.Value) - } - f2 = append(f2, f2elem) - } - res.SetTags(f2) - } return res, nil } @@ -203,10 +249,10 @@ func (rm *resourceManager) sdkUpdate( ctx context.Context, desired *resource, latest *resource, - diffReporter *ackcompare.Reporter, + delta *ackcompare.Delta, ) (*resource, error) { - customResp, customRespErr := rm.customUpdateEndpoint(ctx, desired, latest, diffReporter) + customResp, customRespErr := rm.customUpdateEndpoint(ctx, desired, latest, delta) if customResp != nil || customRespErr != nil { return customResp, customRespErr } @@ -258,6 +304,7 @@ func (rm *resourceManager) newUpdateRequestPayload( if r.ko.Spec.EndpointName != nil { res.SetEndpointName(*r.ko.Spec.EndpointName) } + res.SetRetainAllVariantProperties(true) return res, nil } diff --git a/pkg/resource/endpoint_config/delta.go b/pkg/resource/endpoint_config/delta.go new file mode 100644 index 00000000..a5c18af9 --- /dev/null +++ b/pkg/resource/endpoint_config/delta.go @@ -0,0 +1,96 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package endpoint_config + +import ( + ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" +) + +// newResourceDelta returns a new `ackcompare.Delta` used to compare two +// resources +func newResourceDelta( + a *resource, + b *resource, +) *ackcompare.Delta { + delta := ackcompare.NewDelta() + if (a == nil && b != nil) || + (a != nil && b == nil) { + delta.Add("", a, b) + return delta + } + + if ackcompare.HasNilDifference(a.ko.Spec.DataCaptureConfig, b.ko.Spec.DataCaptureConfig) { + delta.Add("Spec.DataCaptureConfig", a.ko.Spec.DataCaptureConfig, b.ko.Spec.DataCaptureConfig) + } else if a.ko.Spec.DataCaptureConfig != nil && b.ko.Spec.DataCaptureConfig != nil { + if ackcompare.HasNilDifference(a.ko.Spec.DataCaptureConfig.CaptureContentTypeHeader, b.ko.Spec.DataCaptureConfig.CaptureContentTypeHeader) { + delta.Add("Spec.DataCaptureConfig.CaptureContentTypeHeader", a.ko.Spec.DataCaptureConfig.CaptureContentTypeHeader, b.ko.Spec.DataCaptureConfig.CaptureContentTypeHeader) + } else if a.ko.Spec.DataCaptureConfig.CaptureContentTypeHeader != nil && b.ko.Spec.DataCaptureConfig.CaptureContentTypeHeader != nil { + + if !ackcompare.SliceStringPEqual(a.ko.Spec.DataCaptureConfig.CaptureContentTypeHeader.CsvContentTypes, b.ko.Spec.DataCaptureConfig.CaptureContentTypeHeader.CsvContentTypes) { + delta.Add("Spec.DataCaptureConfig.CaptureContentTypeHeader.CsvContentTypes", a.ko.Spec.DataCaptureConfig.CaptureContentTypeHeader.CsvContentTypes, b.ko.Spec.DataCaptureConfig.CaptureContentTypeHeader.CsvContentTypes) + } + + if !ackcompare.SliceStringPEqual(a.ko.Spec.DataCaptureConfig.CaptureContentTypeHeader.JSONContentTypes, b.ko.Spec.DataCaptureConfig.CaptureContentTypeHeader.JSONContentTypes) { + delta.Add("Spec.DataCaptureConfig.CaptureContentTypeHeader.JSONContentTypes", a.ko.Spec.DataCaptureConfig.CaptureContentTypeHeader.JSONContentTypes, b.ko.Spec.DataCaptureConfig.CaptureContentTypeHeader.JSONContentTypes) + } + } + + if ackcompare.HasNilDifference(a.ko.Spec.DataCaptureConfig.DestinationS3URI, b.ko.Spec.DataCaptureConfig.DestinationS3URI) { + delta.Add("Spec.DataCaptureConfig.DestinationS3URI", a.ko.Spec.DataCaptureConfig.DestinationS3URI, b.ko.Spec.DataCaptureConfig.DestinationS3URI) + } else if a.ko.Spec.DataCaptureConfig.DestinationS3URI != nil && b.ko.Spec.DataCaptureConfig.DestinationS3URI != nil { + if *a.ko.Spec.DataCaptureConfig.DestinationS3URI != *b.ko.Spec.DataCaptureConfig.DestinationS3URI { + delta.Add("Spec.DataCaptureConfig.DestinationS3URI", a.ko.Spec.DataCaptureConfig.DestinationS3URI, b.ko.Spec.DataCaptureConfig.DestinationS3URI) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.DataCaptureConfig.EnableCapture, b.ko.Spec.DataCaptureConfig.EnableCapture) { + delta.Add("Spec.DataCaptureConfig.EnableCapture", a.ko.Spec.DataCaptureConfig.EnableCapture, b.ko.Spec.DataCaptureConfig.EnableCapture) + } else if a.ko.Spec.DataCaptureConfig.EnableCapture != nil && b.ko.Spec.DataCaptureConfig.EnableCapture != nil { + if *a.ko.Spec.DataCaptureConfig.EnableCapture != *b.ko.Spec.DataCaptureConfig.EnableCapture { + delta.Add("Spec.DataCaptureConfig.EnableCapture", a.ko.Spec.DataCaptureConfig.EnableCapture, b.ko.Spec.DataCaptureConfig.EnableCapture) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.DataCaptureConfig.InitialSamplingPercentage, b.ko.Spec.DataCaptureConfig.InitialSamplingPercentage) { + delta.Add("Spec.DataCaptureConfig.InitialSamplingPercentage", a.ko.Spec.DataCaptureConfig.InitialSamplingPercentage, b.ko.Spec.DataCaptureConfig.InitialSamplingPercentage) + } else if a.ko.Spec.DataCaptureConfig.InitialSamplingPercentage != nil && b.ko.Spec.DataCaptureConfig.InitialSamplingPercentage != nil { + if *a.ko.Spec.DataCaptureConfig.InitialSamplingPercentage != *b.ko.Spec.DataCaptureConfig.InitialSamplingPercentage { + delta.Add("Spec.DataCaptureConfig.InitialSamplingPercentage", a.ko.Spec.DataCaptureConfig.InitialSamplingPercentage, b.ko.Spec.DataCaptureConfig.InitialSamplingPercentage) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.DataCaptureConfig.KMSKeyID, b.ko.Spec.DataCaptureConfig.KMSKeyID) { + delta.Add("Spec.DataCaptureConfig.KMSKeyID", a.ko.Spec.DataCaptureConfig.KMSKeyID, b.ko.Spec.DataCaptureConfig.KMSKeyID) + } else if a.ko.Spec.DataCaptureConfig.KMSKeyID != nil && b.ko.Spec.DataCaptureConfig.KMSKeyID != nil { + if *a.ko.Spec.DataCaptureConfig.KMSKeyID != *b.ko.Spec.DataCaptureConfig.KMSKeyID { + delta.Add("Spec.DataCaptureConfig.KMSKeyID", a.ko.Spec.DataCaptureConfig.KMSKeyID, b.ko.Spec.DataCaptureConfig.KMSKeyID) + } + } + } + if ackcompare.HasNilDifference(a.ko.Spec.EndpointConfigName, b.ko.Spec.EndpointConfigName) { + delta.Add("Spec.EndpointConfigName", a.ko.Spec.EndpointConfigName, b.ko.Spec.EndpointConfigName) + } else if a.ko.Spec.EndpointConfigName != nil && b.ko.Spec.EndpointConfigName != nil { + if *a.ko.Spec.EndpointConfigName != *b.ko.Spec.EndpointConfigName { + delta.Add("Spec.EndpointConfigName", a.ko.Spec.EndpointConfigName, b.ko.Spec.EndpointConfigName) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.KMSKeyID, b.ko.Spec.KMSKeyID) { + delta.Add("Spec.KMSKeyID", a.ko.Spec.KMSKeyID, b.ko.Spec.KMSKeyID) + } else if a.ko.Spec.KMSKeyID != nil && b.ko.Spec.KMSKeyID != nil { + if *a.ko.Spec.KMSKeyID != *b.ko.Spec.KMSKeyID { + delta.Add("Spec.KMSKeyID", a.ko.Spec.KMSKeyID, b.ko.Spec.KMSKeyID) + } + } + + return delta +} diff --git a/pkg/resource/endpoint_config/descriptor.go b/pkg/resource/endpoint_config/descriptor.go index 684c96e5..383f74e7 100644 --- a/pkg/resource/endpoint_config/descriptor.go +++ b/pkg/resource/endpoint_config/descriptor.go @@ -18,8 +18,6 @@ package endpoint_config import ( ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" k8sapirt "k8s.io/apimachinery/pkg/runtime" k8sctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -65,37 +63,10 @@ func (d *resourceDescriptor) ResourceFromRuntimeObject( } } -// Equal returns true if the two supplied AWSResources have the same content. -// The underlying types of the two supplied AWSResources should be the same. In -// other words, the Equal() method should be called with the same concrete -// implementing AWSResource type -func (d *resourceDescriptor) Equal( - a acktypes.AWSResource, - b acktypes.AWSResource, -) bool { - ac := a.(*resource) - bc := b.(*resource) - opts := []cmp.Option{cmpopts.EquateEmpty()} - return cmp.Equal(ac.ko, bc.ko, opts...) -} - -// Diff returns a Reporter which provides the difference between two supplied -// AWSResources. The underlying types of the two supplied AWSResources should -// be the same. In other words, the Diff() method should be called with the -// same concrete implementing AWSResource type -func (d *resourceDescriptor) Diff( - a acktypes.AWSResource, - b acktypes.AWSResource, -) *ackcompare.Reporter { - ac := a.(*resource) - bc := b.(*resource) - var diffReporter ackcompare.Reporter - opts := []cmp.Option{ - cmp.Reporter(&diffReporter), - cmp.AllowUnexported(svcapitypes.EndpointConfig{}), - } - cmp.Equal(ac.ko, bc.ko, opts...) - return &diffReporter +// Delta returns an `ackcompare.Delta` object containing the difference between +// one `AWSResource` and another. +func (d *resourceDescriptor) Delta(a, b acktypes.AWSResource) *ackcompare.Delta { + return newResourceDelta(a.(*resource), b.(*resource)) } // UpdateCRStatus accepts an AWSResource object and changes the Status diff --git a/pkg/resource/endpoint_config/manager.go b/pkg/resource/endpoint_config/manager.go index e743688c..3c77674f 100644 --- a/pkg/resource/endpoint_config/manager.go +++ b/pkg/resource/endpoint_config/manager.go @@ -125,7 +125,7 @@ func (rm *resourceManager) Update( ctx context.Context, resDesired acktypes.AWSResource, resLatest acktypes.AWSResource, - diffReporter *ackcompare.Reporter, + delta *ackcompare.Delta, ) (acktypes.AWSResource, error) { desired := rm.concreteResource(resDesired) latest := rm.concreteResource(resLatest) @@ -133,7 +133,7 @@ func (rm *resourceManager) Update( // Should never happen... if it does, it's buggy code. panic("resource manager's Update() method received resource with nil CR object") } - updated, err := rm.sdkUpdate(ctx, desired, latest, diffReporter) + updated, err := rm.sdkUpdate(ctx, desired, latest, delta) if err != nil { return rm.onError(latest, err) } diff --git a/pkg/resource/endpoint_config/resource.go b/pkg/resource/endpoint_config/resource.go index 3061a142..1ca8e01f 100644 --- a/pkg/resource/endpoint_config/resource.go +++ b/pkg/resource/endpoint_config/resource.go @@ -24,7 +24,7 @@ import ( svcapitypes "github.com/aws-controllers-k8s/sagemaker-controller/apis/v1alpha1" ) -// resource implements the `aws-service-operator-k8s/pkg/types.AWSResource` +// resource implements the `aws-controller-k8s/runtime/pkg/types.AWSResource` // interface type resource struct { // The Kubernetes-native CR representing the resource diff --git a/pkg/resource/endpoint_config/sdk.go b/pkg/resource/endpoint_config/sdk.go index 5ad05e5c..62509400 100644 --- a/pkg/resource/endpoint_config/sdk.go +++ b/pkg/resource/endpoint_config/sdk.go @@ -119,6 +119,8 @@ func (rm *resourceManager) sdkFind( f1.KMSKeyID = resp.DataCaptureConfig.KmsKeyId } ko.Spec.DataCaptureConfig = f1 + } else { + ko.Spec.DataCaptureConfig = nil } if ko.Status.ACKResourceMetadata == nil { ko.Status.ACKResourceMetadata = &ackv1alpha1.ResourceMetadata{} @@ -129,9 +131,13 @@ func (rm *resourceManager) sdkFind( } if resp.EndpointConfigName != nil { ko.Spec.EndpointConfigName = resp.EndpointConfigName + } else { + ko.Spec.EndpointConfigName = nil } if resp.KmsKeyId != nil { ko.Spec.KMSKeyID = resp.KmsKeyId + } else { + ko.Spec.KMSKeyID = nil } if resp.ProductionVariants != nil { f5 := []*svcapitypes.ProductionVariant{} @@ -168,6 +174,8 @@ func (rm *resourceManager) sdkFind( f5 = append(f5, f5elem) } ko.Spec.ProductionVariants = f5 + } else { + ko.Spec.ProductionVariants = nil } rm.setStatusDefaults(ko) @@ -331,20 +339,6 @@ func (rm *resourceManager) newCreateRequestPayload( } res.SetProductionVariants(f3) } - if r.ko.Spec.Tags != nil { - f4 := []*svcsdk.Tag{} - for _, f4iter := range r.ko.Spec.Tags { - f4elem := &svcsdk.Tag{} - if f4iter.Key != nil { - f4elem.SetKey(*f4iter.Key) - } - if f4iter.Value != nil { - f4elem.SetValue(*f4iter.Value) - } - f4 = append(f4, f4elem) - } - res.SetTags(f4) - } return res, nil } @@ -355,7 +349,7 @@ func (rm *resourceManager) sdkUpdate( ctx context.Context, desired *resource, latest *resource, - diffReporter *ackcompare.Reporter, + delta *ackcompare.Delta, ) (*resource, error) { // TODO(jaypipes): Figure this out... return nil, ackerr.NotImplemented diff --git a/pkg/resource/hyper_parameter_tuning_job/delta.go b/pkg/resource/hyper_parameter_tuning_job/delta.go new file mode 100644 index 00000000..c58ac0e2 --- /dev/null +++ b/pkg/resource/hyper_parameter_tuning_job/delta.go @@ -0,0 +1,321 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package hyper_parameter_tuning_job + +import ( + ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" +) + +// newResourceDelta returns a new `ackcompare.Delta` used to compare two +// resources +func newResourceDelta( + a *resource, + b *resource, +) *ackcompare.Delta { + delta := ackcompare.NewDelta() + if (a == nil && b != nil) || + (a != nil && b == nil) { + delta.Add("", a, b) + return delta + } + + if ackcompare.HasNilDifference(a.ko.Spec.HyperParameterTuningJobConfig, b.ko.Spec.HyperParameterTuningJobConfig) { + delta.Add("Spec.HyperParameterTuningJobConfig", a.ko.Spec.HyperParameterTuningJobConfig, b.ko.Spec.HyperParameterTuningJobConfig) + } else if a.ko.Spec.HyperParameterTuningJobConfig != nil && b.ko.Spec.HyperParameterTuningJobConfig != nil { + if ackcompare.HasNilDifference(a.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective, b.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective) { + delta.Add("Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective", a.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective, b.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective) + } else if a.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective != nil && b.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective != nil { + if ackcompare.HasNilDifference(a.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.MetricName, b.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.MetricName) { + delta.Add("Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.MetricName", a.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.MetricName, b.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.MetricName) + } else if a.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.MetricName != nil && b.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.MetricName != nil { + if *a.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.MetricName != *b.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.MetricName { + delta.Add("Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.MetricName", a.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.MetricName, b.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.MetricName) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.Type, b.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.Type) { + delta.Add("Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.Type", a.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.Type, b.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.Type) + } else if a.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.Type != nil && b.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.Type != nil { + if *a.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.Type != *b.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.Type { + delta.Add("Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.Type", a.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.Type, b.ko.Spec.HyperParameterTuningJobConfig.HyperParameterTuningJobObjective.Type) + } + } + } + if ackcompare.HasNilDifference(a.ko.Spec.HyperParameterTuningJobConfig.ParameterRanges, b.ko.Spec.HyperParameterTuningJobConfig.ParameterRanges) { + delta.Add("Spec.HyperParameterTuningJobConfig.ParameterRanges", a.ko.Spec.HyperParameterTuningJobConfig.ParameterRanges, b.ko.Spec.HyperParameterTuningJobConfig.ParameterRanges) + } else if a.ko.Spec.HyperParameterTuningJobConfig.ParameterRanges != nil && b.ko.Spec.HyperParameterTuningJobConfig.ParameterRanges != nil { + + } + if ackcompare.HasNilDifference(a.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits, b.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits) { + delta.Add("Spec.HyperParameterTuningJobConfig.ResourceLimits", a.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits, b.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits) + } else if a.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits != nil && b.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits != nil { + if ackcompare.HasNilDifference(a.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxNumberOfTrainingJobs, b.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxNumberOfTrainingJobs) { + delta.Add("Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxNumberOfTrainingJobs", a.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxNumberOfTrainingJobs, b.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxNumberOfTrainingJobs) + } else if a.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxNumberOfTrainingJobs != nil && b.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxNumberOfTrainingJobs != nil { + if *a.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxNumberOfTrainingJobs != *b.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxNumberOfTrainingJobs { + delta.Add("Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxNumberOfTrainingJobs", a.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxNumberOfTrainingJobs, b.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxNumberOfTrainingJobs) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxParallelTrainingJobs, b.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxParallelTrainingJobs) { + delta.Add("Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxParallelTrainingJobs", a.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxParallelTrainingJobs, b.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxParallelTrainingJobs) + } else if a.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxParallelTrainingJobs != nil && b.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxParallelTrainingJobs != nil { + if *a.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxParallelTrainingJobs != *b.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxParallelTrainingJobs { + delta.Add("Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxParallelTrainingJobs", a.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxParallelTrainingJobs, b.ko.Spec.HyperParameterTuningJobConfig.ResourceLimits.MaxParallelTrainingJobs) + } + } + } + if ackcompare.HasNilDifference(a.ko.Spec.HyperParameterTuningJobConfig.Strategy, b.ko.Spec.HyperParameterTuningJobConfig.Strategy) { + delta.Add("Spec.HyperParameterTuningJobConfig.Strategy", a.ko.Spec.HyperParameterTuningJobConfig.Strategy, b.ko.Spec.HyperParameterTuningJobConfig.Strategy) + } else if a.ko.Spec.HyperParameterTuningJobConfig.Strategy != nil && b.ko.Spec.HyperParameterTuningJobConfig.Strategy != nil { + if *a.ko.Spec.HyperParameterTuningJobConfig.Strategy != *b.ko.Spec.HyperParameterTuningJobConfig.Strategy { + delta.Add("Spec.HyperParameterTuningJobConfig.Strategy", a.ko.Spec.HyperParameterTuningJobConfig.Strategy, b.ko.Spec.HyperParameterTuningJobConfig.Strategy) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.HyperParameterTuningJobConfig.TrainingJobEarlyStoppingType, b.ko.Spec.HyperParameterTuningJobConfig.TrainingJobEarlyStoppingType) { + delta.Add("Spec.HyperParameterTuningJobConfig.TrainingJobEarlyStoppingType", a.ko.Spec.HyperParameterTuningJobConfig.TrainingJobEarlyStoppingType, b.ko.Spec.HyperParameterTuningJobConfig.TrainingJobEarlyStoppingType) + } else if a.ko.Spec.HyperParameterTuningJobConfig.TrainingJobEarlyStoppingType != nil && b.ko.Spec.HyperParameterTuningJobConfig.TrainingJobEarlyStoppingType != nil { + if *a.ko.Spec.HyperParameterTuningJobConfig.TrainingJobEarlyStoppingType != *b.ko.Spec.HyperParameterTuningJobConfig.TrainingJobEarlyStoppingType { + delta.Add("Spec.HyperParameterTuningJobConfig.TrainingJobEarlyStoppingType", a.ko.Spec.HyperParameterTuningJobConfig.TrainingJobEarlyStoppingType, b.ko.Spec.HyperParameterTuningJobConfig.TrainingJobEarlyStoppingType) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria, b.ko.Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria) { + delta.Add("Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria", a.ko.Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria, b.ko.Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria) + } else if a.ko.Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria != nil && b.ko.Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria != nil { + if ackcompare.HasNilDifference(a.ko.Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria.TargetObjectiveMetricValue, b.ko.Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria.TargetObjectiveMetricValue) { + delta.Add("Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria.TargetObjectiveMetricValue", a.ko.Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria.TargetObjectiveMetricValue, b.ko.Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria.TargetObjectiveMetricValue) + } else if a.ko.Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria.TargetObjectiveMetricValue != nil && b.ko.Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria.TargetObjectiveMetricValue != nil { + if *a.ko.Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria.TargetObjectiveMetricValue != *b.ko.Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria.TargetObjectiveMetricValue { + delta.Add("Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria.TargetObjectiveMetricValue", a.ko.Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria.TargetObjectiveMetricValue, b.ko.Spec.HyperParameterTuningJobConfig.TuningJobCompletionCriteria.TargetObjectiveMetricValue) + } + } + } + } + if ackcompare.HasNilDifference(a.ko.Spec.HyperParameterTuningJobName, b.ko.Spec.HyperParameterTuningJobName) { + delta.Add("Spec.HyperParameterTuningJobName", a.ko.Spec.HyperParameterTuningJobName, b.ko.Spec.HyperParameterTuningJobName) + } else if a.ko.Spec.HyperParameterTuningJobName != nil && b.ko.Spec.HyperParameterTuningJobName != nil { + if *a.ko.Spec.HyperParameterTuningJobName != *b.ko.Spec.HyperParameterTuningJobName { + delta.Add("Spec.HyperParameterTuningJobName", a.ko.Spec.HyperParameterTuningJobName, b.ko.Spec.HyperParameterTuningJobName) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition, b.ko.Spec.TrainingJobDefinition) { + delta.Add("Spec.TrainingJobDefinition", a.ko.Spec.TrainingJobDefinition, b.ko.Spec.TrainingJobDefinition) + } else if a.ko.Spec.TrainingJobDefinition != nil && b.ko.Spec.TrainingJobDefinition != nil { + if ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification, b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification) { + delta.Add("Spec.TrainingJobDefinition.AlgorithmSpecification", a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification, b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification) + } else if a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification != nil && b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification != nil { + if ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.AlgorithmName, b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.AlgorithmName) { + delta.Add("Spec.TrainingJobDefinition.AlgorithmSpecification.AlgorithmName", a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.AlgorithmName, b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.AlgorithmName) + } else if a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.AlgorithmName != nil && b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.AlgorithmName != nil { + if *a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.AlgorithmName != *b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.AlgorithmName { + delta.Add("Spec.TrainingJobDefinition.AlgorithmSpecification.AlgorithmName", a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.AlgorithmName, b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.AlgorithmName) + } + } + + if ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingImage, b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingImage) { + delta.Add("Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingImage", a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingImage, b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingImage) + } else if a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingImage != nil && b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingImage != nil { + if *a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingImage != *b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingImage { + delta.Add("Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingImage", a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingImage, b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingImage) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingInputMode, b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingInputMode) { + delta.Add("Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingInputMode", a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingInputMode, b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingInputMode) + } else if a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingInputMode != nil && b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingInputMode != nil { + if *a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingInputMode != *b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingInputMode { + delta.Add("Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingInputMode", a.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingInputMode, b.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingInputMode) + } + } + } + if ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.CheckpointConfig, b.ko.Spec.TrainingJobDefinition.CheckpointConfig) { + delta.Add("Spec.TrainingJobDefinition.CheckpointConfig", a.ko.Spec.TrainingJobDefinition.CheckpointConfig, b.ko.Spec.TrainingJobDefinition.CheckpointConfig) + } else if a.ko.Spec.TrainingJobDefinition.CheckpointConfig != nil && b.ko.Spec.TrainingJobDefinition.CheckpointConfig != nil { + if ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.CheckpointConfig.LocalPath, b.ko.Spec.TrainingJobDefinition.CheckpointConfig.LocalPath) { + delta.Add("Spec.TrainingJobDefinition.CheckpointConfig.LocalPath", a.ko.Spec.TrainingJobDefinition.CheckpointConfig.LocalPath, b.ko.Spec.TrainingJobDefinition.CheckpointConfig.LocalPath) + } else if a.ko.Spec.TrainingJobDefinition.CheckpointConfig.LocalPath != nil && b.ko.Spec.TrainingJobDefinition.CheckpointConfig.LocalPath != nil { + if *a.ko.Spec.TrainingJobDefinition.CheckpointConfig.LocalPath != *b.ko.Spec.TrainingJobDefinition.CheckpointConfig.LocalPath { + delta.Add("Spec.TrainingJobDefinition.CheckpointConfig.LocalPath", a.ko.Spec.TrainingJobDefinition.CheckpointConfig.LocalPath, b.ko.Spec.TrainingJobDefinition.CheckpointConfig.LocalPath) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.CheckpointConfig.S3URI, b.ko.Spec.TrainingJobDefinition.CheckpointConfig.S3URI) { + delta.Add("Spec.TrainingJobDefinition.CheckpointConfig.S3URI", a.ko.Spec.TrainingJobDefinition.CheckpointConfig.S3URI, b.ko.Spec.TrainingJobDefinition.CheckpointConfig.S3URI) + } else if a.ko.Spec.TrainingJobDefinition.CheckpointConfig.S3URI != nil && b.ko.Spec.TrainingJobDefinition.CheckpointConfig.S3URI != nil { + if *a.ko.Spec.TrainingJobDefinition.CheckpointConfig.S3URI != *b.ko.Spec.TrainingJobDefinition.CheckpointConfig.S3URI { + delta.Add("Spec.TrainingJobDefinition.CheckpointConfig.S3URI", a.ko.Spec.TrainingJobDefinition.CheckpointConfig.S3URI, b.ko.Spec.TrainingJobDefinition.CheckpointConfig.S3URI) + } + } + } + if ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.DefinitionName, b.ko.Spec.TrainingJobDefinition.DefinitionName) { + delta.Add("Spec.TrainingJobDefinition.DefinitionName", a.ko.Spec.TrainingJobDefinition.DefinitionName, b.ko.Spec.TrainingJobDefinition.DefinitionName) + } else if a.ko.Spec.TrainingJobDefinition.DefinitionName != nil && b.ko.Spec.TrainingJobDefinition.DefinitionName != nil { + if *a.ko.Spec.TrainingJobDefinition.DefinitionName != *b.ko.Spec.TrainingJobDefinition.DefinitionName { + delta.Add("Spec.TrainingJobDefinition.DefinitionName", a.ko.Spec.TrainingJobDefinition.DefinitionName, b.ko.Spec.TrainingJobDefinition.DefinitionName) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.EnableInterContainerTrafficEncryption, b.ko.Spec.TrainingJobDefinition.EnableInterContainerTrafficEncryption) { + delta.Add("Spec.TrainingJobDefinition.EnableInterContainerTrafficEncryption", a.ko.Spec.TrainingJobDefinition.EnableInterContainerTrafficEncryption, b.ko.Spec.TrainingJobDefinition.EnableInterContainerTrafficEncryption) + } else if a.ko.Spec.TrainingJobDefinition.EnableInterContainerTrafficEncryption != nil && b.ko.Spec.TrainingJobDefinition.EnableInterContainerTrafficEncryption != nil { + if *a.ko.Spec.TrainingJobDefinition.EnableInterContainerTrafficEncryption != *b.ko.Spec.TrainingJobDefinition.EnableInterContainerTrafficEncryption { + delta.Add("Spec.TrainingJobDefinition.EnableInterContainerTrafficEncryption", a.ko.Spec.TrainingJobDefinition.EnableInterContainerTrafficEncryption, b.ko.Spec.TrainingJobDefinition.EnableInterContainerTrafficEncryption) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.EnableManagedSpotTraining, b.ko.Spec.TrainingJobDefinition.EnableManagedSpotTraining) { + delta.Add("Spec.TrainingJobDefinition.EnableManagedSpotTraining", a.ko.Spec.TrainingJobDefinition.EnableManagedSpotTraining, b.ko.Spec.TrainingJobDefinition.EnableManagedSpotTraining) + } else if a.ko.Spec.TrainingJobDefinition.EnableManagedSpotTraining != nil && b.ko.Spec.TrainingJobDefinition.EnableManagedSpotTraining != nil { + if *a.ko.Spec.TrainingJobDefinition.EnableManagedSpotTraining != *b.ko.Spec.TrainingJobDefinition.EnableManagedSpotTraining { + delta.Add("Spec.TrainingJobDefinition.EnableManagedSpotTraining", a.ko.Spec.TrainingJobDefinition.EnableManagedSpotTraining, b.ko.Spec.TrainingJobDefinition.EnableManagedSpotTraining) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.EnableNetworkIsolation, b.ko.Spec.TrainingJobDefinition.EnableNetworkIsolation) { + delta.Add("Spec.TrainingJobDefinition.EnableNetworkIsolation", a.ko.Spec.TrainingJobDefinition.EnableNetworkIsolation, b.ko.Spec.TrainingJobDefinition.EnableNetworkIsolation) + } else if a.ko.Spec.TrainingJobDefinition.EnableNetworkIsolation != nil && b.ko.Spec.TrainingJobDefinition.EnableNetworkIsolation != nil { + if *a.ko.Spec.TrainingJobDefinition.EnableNetworkIsolation != *b.ko.Spec.TrainingJobDefinition.EnableNetworkIsolation { + delta.Add("Spec.TrainingJobDefinition.EnableNetworkIsolation", a.ko.Spec.TrainingJobDefinition.EnableNetworkIsolation, b.ko.Spec.TrainingJobDefinition.EnableNetworkIsolation) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.HyperParameterRanges, b.ko.Spec.TrainingJobDefinition.HyperParameterRanges) { + delta.Add("Spec.TrainingJobDefinition.HyperParameterRanges", a.ko.Spec.TrainingJobDefinition.HyperParameterRanges, b.ko.Spec.TrainingJobDefinition.HyperParameterRanges) + } else if a.ko.Spec.TrainingJobDefinition.HyperParameterRanges != nil && b.ko.Spec.TrainingJobDefinition.HyperParameterRanges != nil { + + } + + if ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.OutputDataConfig, b.ko.Spec.TrainingJobDefinition.OutputDataConfig) { + delta.Add("Spec.TrainingJobDefinition.OutputDataConfig", a.ko.Spec.TrainingJobDefinition.OutputDataConfig, b.ko.Spec.TrainingJobDefinition.OutputDataConfig) + } else if a.ko.Spec.TrainingJobDefinition.OutputDataConfig != nil && b.ko.Spec.TrainingJobDefinition.OutputDataConfig != nil { + if ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.OutputDataConfig.KMSKeyID, b.ko.Spec.TrainingJobDefinition.OutputDataConfig.KMSKeyID) { + delta.Add("Spec.TrainingJobDefinition.OutputDataConfig.KMSKeyID", a.ko.Spec.TrainingJobDefinition.OutputDataConfig.KMSKeyID, b.ko.Spec.TrainingJobDefinition.OutputDataConfig.KMSKeyID) + } else if a.ko.Spec.TrainingJobDefinition.OutputDataConfig.KMSKeyID != nil && b.ko.Spec.TrainingJobDefinition.OutputDataConfig.KMSKeyID != nil { + if *a.ko.Spec.TrainingJobDefinition.OutputDataConfig.KMSKeyID != *b.ko.Spec.TrainingJobDefinition.OutputDataConfig.KMSKeyID { + delta.Add("Spec.TrainingJobDefinition.OutputDataConfig.KMSKeyID", a.ko.Spec.TrainingJobDefinition.OutputDataConfig.KMSKeyID, b.ko.Spec.TrainingJobDefinition.OutputDataConfig.KMSKeyID) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.OutputDataConfig.S3OutputPath, b.ko.Spec.TrainingJobDefinition.OutputDataConfig.S3OutputPath) { + delta.Add("Spec.TrainingJobDefinition.OutputDataConfig.S3OutputPath", a.ko.Spec.TrainingJobDefinition.OutputDataConfig.S3OutputPath, b.ko.Spec.TrainingJobDefinition.OutputDataConfig.S3OutputPath) + } else if a.ko.Spec.TrainingJobDefinition.OutputDataConfig.S3OutputPath != nil && b.ko.Spec.TrainingJobDefinition.OutputDataConfig.S3OutputPath != nil { + if *a.ko.Spec.TrainingJobDefinition.OutputDataConfig.S3OutputPath != *b.ko.Spec.TrainingJobDefinition.OutputDataConfig.S3OutputPath { + delta.Add("Spec.TrainingJobDefinition.OutputDataConfig.S3OutputPath", a.ko.Spec.TrainingJobDefinition.OutputDataConfig.S3OutputPath, b.ko.Spec.TrainingJobDefinition.OutputDataConfig.S3OutputPath) + } + } + } + if ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.ResourceConfig, b.ko.Spec.TrainingJobDefinition.ResourceConfig) { + delta.Add("Spec.TrainingJobDefinition.ResourceConfig", a.ko.Spec.TrainingJobDefinition.ResourceConfig, b.ko.Spec.TrainingJobDefinition.ResourceConfig) + } else if a.ko.Spec.TrainingJobDefinition.ResourceConfig != nil && b.ko.Spec.TrainingJobDefinition.ResourceConfig != nil { + if ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceCount, b.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceCount) { + delta.Add("Spec.TrainingJobDefinition.ResourceConfig.InstanceCount", a.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceCount, b.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceCount) + } else if a.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceCount != nil && b.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceCount != nil { + if *a.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceCount != *b.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceCount { + delta.Add("Spec.TrainingJobDefinition.ResourceConfig.InstanceCount", a.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceCount, b.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceCount) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceType, b.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceType) { + delta.Add("Spec.TrainingJobDefinition.ResourceConfig.InstanceType", a.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceType, b.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceType) + } else if a.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceType != nil && b.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceType != nil { + if *a.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceType != *b.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceType { + delta.Add("Spec.TrainingJobDefinition.ResourceConfig.InstanceType", a.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceType, b.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceType) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeKMSKeyID, b.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeKMSKeyID) { + delta.Add("Spec.TrainingJobDefinition.ResourceConfig.VolumeKMSKeyID", a.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeKMSKeyID, b.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeKMSKeyID) + } else if a.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeKMSKeyID != nil && b.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeKMSKeyID != nil { + if *a.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeKMSKeyID != *b.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeKMSKeyID { + delta.Add("Spec.TrainingJobDefinition.ResourceConfig.VolumeKMSKeyID", a.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeKMSKeyID, b.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeKMSKeyID) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeSizeInGB, b.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeSizeInGB) { + delta.Add("Spec.TrainingJobDefinition.ResourceConfig.VolumeSizeInGB", a.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeSizeInGB, b.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeSizeInGB) + } else if a.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeSizeInGB != nil && b.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeSizeInGB != nil { + if *a.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeSizeInGB != *b.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeSizeInGB { + delta.Add("Spec.TrainingJobDefinition.ResourceConfig.VolumeSizeInGB", a.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeSizeInGB, b.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeSizeInGB) + } + } + } + if ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.RoleARN, b.ko.Spec.TrainingJobDefinition.RoleARN) { + delta.Add("Spec.TrainingJobDefinition.RoleARN", a.ko.Spec.TrainingJobDefinition.RoleARN, b.ko.Spec.TrainingJobDefinition.RoleARN) + } else if a.ko.Spec.TrainingJobDefinition.RoleARN != nil && b.ko.Spec.TrainingJobDefinition.RoleARN != nil { + if *a.ko.Spec.TrainingJobDefinition.RoleARN != *b.ko.Spec.TrainingJobDefinition.RoleARN { + delta.Add("Spec.TrainingJobDefinition.RoleARN", a.ko.Spec.TrainingJobDefinition.RoleARN, b.ko.Spec.TrainingJobDefinition.RoleARN) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.StaticHyperParameters, b.ko.Spec.TrainingJobDefinition.StaticHyperParameters) { + delta.Add("Spec.TrainingJobDefinition.StaticHyperParameters", a.ko.Spec.TrainingJobDefinition.StaticHyperParameters, b.ko.Spec.TrainingJobDefinition.StaticHyperParameters) + } else if a.ko.Spec.TrainingJobDefinition.StaticHyperParameters != nil && b.ko.Spec.TrainingJobDefinition.StaticHyperParameters != nil { + if !ackcompare.MapStringStringPEqual(a.ko.Spec.TrainingJobDefinition.StaticHyperParameters, b.ko.Spec.TrainingJobDefinition.StaticHyperParameters) { + delta.Add("Spec.TrainingJobDefinition.StaticHyperParameters", a.ko.Spec.TrainingJobDefinition.StaticHyperParameters, b.ko.Spec.TrainingJobDefinition.StaticHyperParameters) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.StoppingCondition, b.ko.Spec.TrainingJobDefinition.StoppingCondition) { + delta.Add("Spec.TrainingJobDefinition.StoppingCondition", a.ko.Spec.TrainingJobDefinition.StoppingCondition, b.ko.Spec.TrainingJobDefinition.StoppingCondition) + } else if a.ko.Spec.TrainingJobDefinition.StoppingCondition != nil && b.ko.Spec.TrainingJobDefinition.StoppingCondition != nil { + if ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxRuntimeInSeconds, b.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxRuntimeInSeconds) { + delta.Add("Spec.TrainingJobDefinition.StoppingCondition.MaxRuntimeInSeconds", a.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxRuntimeInSeconds, b.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxRuntimeInSeconds) + } else if a.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxRuntimeInSeconds != nil && b.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxRuntimeInSeconds != nil { + if *a.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxRuntimeInSeconds != *b.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxRuntimeInSeconds { + delta.Add("Spec.TrainingJobDefinition.StoppingCondition.MaxRuntimeInSeconds", a.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxRuntimeInSeconds, b.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxRuntimeInSeconds) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxWaitTimeInSeconds, b.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxWaitTimeInSeconds) { + delta.Add("Spec.TrainingJobDefinition.StoppingCondition.MaxWaitTimeInSeconds", a.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxWaitTimeInSeconds, b.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxWaitTimeInSeconds) + } else if a.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxWaitTimeInSeconds != nil && b.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxWaitTimeInSeconds != nil { + if *a.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxWaitTimeInSeconds != *b.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxWaitTimeInSeconds { + delta.Add("Spec.TrainingJobDefinition.StoppingCondition.MaxWaitTimeInSeconds", a.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxWaitTimeInSeconds, b.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxWaitTimeInSeconds) + } + } + } + if ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.TuningObjective, b.ko.Spec.TrainingJobDefinition.TuningObjective) { + delta.Add("Spec.TrainingJobDefinition.TuningObjective", a.ko.Spec.TrainingJobDefinition.TuningObjective, b.ko.Spec.TrainingJobDefinition.TuningObjective) + } else if a.ko.Spec.TrainingJobDefinition.TuningObjective != nil && b.ko.Spec.TrainingJobDefinition.TuningObjective != nil { + if ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.TuningObjective.MetricName, b.ko.Spec.TrainingJobDefinition.TuningObjective.MetricName) { + delta.Add("Spec.TrainingJobDefinition.TuningObjective.MetricName", a.ko.Spec.TrainingJobDefinition.TuningObjective.MetricName, b.ko.Spec.TrainingJobDefinition.TuningObjective.MetricName) + } else if a.ko.Spec.TrainingJobDefinition.TuningObjective.MetricName != nil && b.ko.Spec.TrainingJobDefinition.TuningObjective.MetricName != nil { + if *a.ko.Spec.TrainingJobDefinition.TuningObjective.MetricName != *b.ko.Spec.TrainingJobDefinition.TuningObjective.MetricName { + delta.Add("Spec.TrainingJobDefinition.TuningObjective.MetricName", a.ko.Spec.TrainingJobDefinition.TuningObjective.MetricName, b.ko.Spec.TrainingJobDefinition.TuningObjective.MetricName) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.TuningObjective.Type, b.ko.Spec.TrainingJobDefinition.TuningObjective.Type) { + delta.Add("Spec.TrainingJobDefinition.TuningObjective.Type", a.ko.Spec.TrainingJobDefinition.TuningObjective.Type, b.ko.Spec.TrainingJobDefinition.TuningObjective.Type) + } else if a.ko.Spec.TrainingJobDefinition.TuningObjective.Type != nil && b.ko.Spec.TrainingJobDefinition.TuningObjective.Type != nil { + if *a.ko.Spec.TrainingJobDefinition.TuningObjective.Type != *b.ko.Spec.TrainingJobDefinition.TuningObjective.Type { + delta.Add("Spec.TrainingJobDefinition.TuningObjective.Type", a.ko.Spec.TrainingJobDefinition.TuningObjective.Type, b.ko.Spec.TrainingJobDefinition.TuningObjective.Type) + } + } + } + if ackcompare.HasNilDifference(a.ko.Spec.TrainingJobDefinition.VPCConfig, b.ko.Spec.TrainingJobDefinition.VPCConfig) { + delta.Add("Spec.TrainingJobDefinition.VPCConfig", a.ko.Spec.TrainingJobDefinition.VPCConfig, b.ko.Spec.TrainingJobDefinition.VPCConfig) + } else if a.ko.Spec.TrainingJobDefinition.VPCConfig != nil && b.ko.Spec.TrainingJobDefinition.VPCConfig != nil { + + if !ackcompare.SliceStringPEqual(a.ko.Spec.TrainingJobDefinition.VPCConfig.SecurityGroupIDs, b.ko.Spec.TrainingJobDefinition.VPCConfig.SecurityGroupIDs) { + delta.Add("Spec.TrainingJobDefinition.VPCConfig.SecurityGroupIDs", a.ko.Spec.TrainingJobDefinition.VPCConfig.SecurityGroupIDs, b.ko.Spec.TrainingJobDefinition.VPCConfig.SecurityGroupIDs) + } + + if !ackcompare.SliceStringPEqual(a.ko.Spec.TrainingJobDefinition.VPCConfig.Subnets, b.ko.Spec.TrainingJobDefinition.VPCConfig.Subnets) { + delta.Add("Spec.TrainingJobDefinition.VPCConfig.Subnets", a.ko.Spec.TrainingJobDefinition.VPCConfig.Subnets, b.ko.Spec.TrainingJobDefinition.VPCConfig.Subnets) + } + } + } + + if ackcompare.HasNilDifference(a.ko.Spec.WarmStartConfig, b.ko.Spec.WarmStartConfig) { + delta.Add("Spec.WarmStartConfig", a.ko.Spec.WarmStartConfig, b.ko.Spec.WarmStartConfig) + } else if a.ko.Spec.WarmStartConfig != nil && b.ko.Spec.WarmStartConfig != nil { + + if ackcompare.HasNilDifference(a.ko.Spec.WarmStartConfig.WarmStartType, b.ko.Spec.WarmStartConfig.WarmStartType) { + delta.Add("Spec.WarmStartConfig.WarmStartType", a.ko.Spec.WarmStartConfig.WarmStartType, b.ko.Spec.WarmStartConfig.WarmStartType) + } else if a.ko.Spec.WarmStartConfig.WarmStartType != nil && b.ko.Spec.WarmStartConfig.WarmStartType != nil { + if *a.ko.Spec.WarmStartConfig.WarmStartType != *b.ko.Spec.WarmStartConfig.WarmStartType { + delta.Add("Spec.WarmStartConfig.WarmStartType", a.ko.Spec.WarmStartConfig.WarmStartType, b.ko.Spec.WarmStartConfig.WarmStartType) + } + } + } + + return delta +} diff --git a/pkg/resource/hyper_parameter_tuning_job/descriptor.go b/pkg/resource/hyper_parameter_tuning_job/descriptor.go index 4cf6950b..d5be55ba 100644 --- a/pkg/resource/hyper_parameter_tuning_job/descriptor.go +++ b/pkg/resource/hyper_parameter_tuning_job/descriptor.go @@ -18,8 +18,6 @@ package hyper_parameter_tuning_job import ( ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" k8sapirt "k8s.io/apimachinery/pkg/runtime" k8sctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -65,37 +63,10 @@ func (d *resourceDescriptor) ResourceFromRuntimeObject( } } -// Equal returns true if the two supplied AWSResources have the same content. -// The underlying types of the two supplied AWSResources should be the same. In -// other words, the Equal() method should be called with the same concrete -// implementing AWSResource type -func (d *resourceDescriptor) Equal( - a acktypes.AWSResource, - b acktypes.AWSResource, -) bool { - ac := a.(*resource) - bc := b.(*resource) - opts := []cmp.Option{cmpopts.EquateEmpty()} - return cmp.Equal(ac.ko, bc.ko, opts...) -} - -// Diff returns a Reporter which provides the difference between two supplied -// AWSResources. The underlying types of the two supplied AWSResources should -// be the same. In other words, the Diff() method should be called with the -// same concrete implementing AWSResource type -func (d *resourceDescriptor) Diff( - a acktypes.AWSResource, - b acktypes.AWSResource, -) *ackcompare.Reporter { - ac := a.(*resource) - bc := b.(*resource) - var diffReporter ackcompare.Reporter - opts := []cmp.Option{ - cmp.Reporter(&diffReporter), - cmp.AllowUnexported(svcapitypes.HyperParameterTuningJob{}), - } - cmp.Equal(ac.ko, bc.ko, opts...) - return &diffReporter +// Delta returns an `ackcompare.Delta` object containing the difference between +// one `AWSResource` and another. +func (d *resourceDescriptor) Delta(a, b acktypes.AWSResource) *ackcompare.Delta { + return newResourceDelta(a.(*resource), b.(*resource)) } // UpdateCRStatus accepts an AWSResource object and changes the Status diff --git a/pkg/resource/hyper_parameter_tuning_job/manager.go b/pkg/resource/hyper_parameter_tuning_job/manager.go index e595d5ca..c8337d44 100644 --- a/pkg/resource/hyper_parameter_tuning_job/manager.go +++ b/pkg/resource/hyper_parameter_tuning_job/manager.go @@ -125,7 +125,7 @@ func (rm *resourceManager) Update( ctx context.Context, resDesired acktypes.AWSResource, resLatest acktypes.AWSResource, - diffReporter *ackcompare.Reporter, + delta *ackcompare.Delta, ) (acktypes.AWSResource, error) { desired := rm.concreteResource(resDesired) latest := rm.concreteResource(resLatest) @@ -133,7 +133,7 @@ func (rm *resourceManager) Update( // Should never happen... if it does, it's buggy code. panic("resource manager's Update() method received resource with nil CR object") } - updated, err := rm.sdkUpdate(ctx, desired, latest, diffReporter) + updated, err := rm.sdkUpdate(ctx, desired, latest, delta) if err != nil { return rm.onError(latest, err) } diff --git a/pkg/resource/hyper_parameter_tuning_job/resource.go b/pkg/resource/hyper_parameter_tuning_job/resource.go index 7fdf0fbd..634110e8 100644 --- a/pkg/resource/hyper_parameter_tuning_job/resource.go +++ b/pkg/resource/hyper_parameter_tuning_job/resource.go @@ -24,7 +24,7 @@ import ( svcapitypes "github.com/aws-controllers-k8s/sagemaker-controller/apis/v1alpha1" ) -// resource implements the `aws-service-operator-k8s/pkg/types.AWSResource` +// resource implements the `aws-controller-k8s/runtime/pkg/types.AWSResource` // interface type resource struct { // The Kubernetes-native CR representing the resource diff --git a/pkg/resource/hyper_parameter_tuning_job/sdk.go b/pkg/resource/hyper_parameter_tuning_job/sdk.go index 5c258727..36c7955c 100644 --- a/pkg/resource/hyper_parameter_tuning_job/sdk.go +++ b/pkg/resource/hyper_parameter_tuning_job/sdk.go @@ -126,9 +126,13 @@ func (rm *resourceManager) sdkFind( f0.TuningJobName = resp.BestTrainingJob.TuningJobName } ko.Status.BestTrainingJob = f0 + } else { + ko.Status.BestTrainingJob = nil } if resp.FailureReason != nil { ko.Status.FailureReason = resp.FailureReason + } else { + ko.Status.FailureReason = nil } if ko.Status.ACKResourceMetadata == nil { ko.Status.ACKResourceMetadata = &ackv1alpha1.ResourceMetadata{} @@ -237,12 +241,18 @@ func (rm *resourceManager) sdkFind( f5.TuningJobCompletionCriteria = f5f5 } ko.Spec.HyperParameterTuningJobConfig = f5 + } else { + ko.Spec.HyperParameterTuningJobConfig = nil } if resp.HyperParameterTuningJobName != nil { ko.Spec.HyperParameterTuningJobName = resp.HyperParameterTuningJobName + } else { + ko.Spec.HyperParameterTuningJobName = nil } if resp.HyperParameterTuningJobStatus != nil { ko.Status.HyperParameterTuningJobStatus = resp.HyperParameterTuningJobStatus + } else { + ko.Status.HyperParameterTuningJobStatus = nil } if resp.OverallBestTrainingJob != nil { f10 := &svcapitypes.HyperParameterTrainingJobSummary{} @@ -299,6 +309,8 @@ func (rm *resourceManager) sdkFind( f10.TuningJobName = resp.OverallBestTrainingJob.TuningJobName } ko.Status.OverallBestTrainingJob = f10 + } else { + ko.Status.OverallBestTrainingJob = nil } if resp.TrainingJobDefinition != nil { f11 := &svcapitypes.HyperParameterTrainingJobDefinition{} @@ -568,6 +580,8 @@ func (rm *resourceManager) sdkFind( f11.VPCConfig = f11f14 } ko.Spec.TrainingJobDefinition = f11 + } else { + ko.Spec.TrainingJobDefinition = nil } if resp.TrainingJobDefinitions != nil { f12 := []*svcapitypes.HyperParameterTrainingJobDefinition{} @@ -841,6 +855,8 @@ func (rm *resourceManager) sdkFind( f12 = append(f12, f12elem) } ko.Spec.TrainingJobDefinitions = f12 + } else { + ko.Spec.TrainingJobDefinitions = nil } if resp.WarmStartConfig != nil { f14 := &svcapitypes.HyperParameterTuningJobWarmStartConfig{} @@ -859,6 +875,8 @@ func (rm *resourceManager) sdkFind( f14.WarmStartType = resp.WarmStartConfig.WarmStartType } ko.Spec.WarmStartConfig = f14 + } else { + ko.Spec.WarmStartConfig = nil } rm.setStatusDefaults(ko) @@ -1035,579 +1053,565 @@ func (rm *resourceManager) newCreateRequestPayload( if r.ko.Spec.HyperParameterTuningJobName != nil { res.SetHyperParameterTuningJobName(*r.ko.Spec.HyperParameterTuningJobName) } - if r.ko.Spec.Tags != nil { - f2 := []*svcsdk.Tag{} - for _, f2iter := range r.ko.Spec.Tags { - f2elem := &svcsdk.Tag{} - if f2iter.Key != nil { - f2elem.SetKey(*f2iter.Key) - } - if f2iter.Value != nil { - f2elem.SetValue(*f2iter.Value) - } - f2 = append(f2, f2elem) - } - res.SetTags(f2) - } if r.ko.Spec.TrainingJobDefinition != nil { - f3 := &svcsdk.HyperParameterTrainingJobDefinition{} + f2 := &svcsdk.HyperParameterTrainingJobDefinition{} if r.ko.Spec.TrainingJobDefinition.AlgorithmSpecification != nil { - f3f0 := &svcsdk.HyperParameterAlgorithmSpecification{} + f2f0 := &svcsdk.HyperParameterAlgorithmSpecification{} if r.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.AlgorithmName != nil { - f3f0.SetAlgorithmName(*r.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.AlgorithmName) + f2f0.SetAlgorithmName(*r.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.AlgorithmName) } if r.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.MetricDefinitions != nil { - f3f0f1 := []*svcsdk.MetricDefinition{} - for _, f3f0f1iter := range r.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.MetricDefinitions { - f3f0f1elem := &svcsdk.MetricDefinition{} - if f3f0f1iter.Name != nil { - f3f0f1elem.SetName(*f3f0f1iter.Name) + f2f0f1 := []*svcsdk.MetricDefinition{} + for _, f2f0f1iter := range r.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.MetricDefinitions { + f2f0f1elem := &svcsdk.MetricDefinition{} + if f2f0f1iter.Name != nil { + f2f0f1elem.SetName(*f2f0f1iter.Name) } - if f3f0f1iter.Regex != nil { - f3f0f1elem.SetRegex(*f3f0f1iter.Regex) + if f2f0f1iter.Regex != nil { + f2f0f1elem.SetRegex(*f2f0f1iter.Regex) } - f3f0f1 = append(f3f0f1, f3f0f1elem) + f2f0f1 = append(f2f0f1, f2f0f1elem) } - f3f0.SetMetricDefinitions(f3f0f1) + f2f0.SetMetricDefinitions(f2f0f1) } if r.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingImage != nil { - f3f0.SetTrainingImage(*r.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingImage) + f2f0.SetTrainingImage(*r.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingImage) } if r.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingInputMode != nil { - f3f0.SetTrainingInputMode(*r.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingInputMode) + f2f0.SetTrainingInputMode(*r.ko.Spec.TrainingJobDefinition.AlgorithmSpecification.TrainingInputMode) } - f3.SetAlgorithmSpecification(f3f0) + f2.SetAlgorithmSpecification(f2f0) } if r.ko.Spec.TrainingJobDefinition.CheckpointConfig != nil { - f3f1 := &svcsdk.CheckpointConfig{} + f2f1 := &svcsdk.CheckpointConfig{} if r.ko.Spec.TrainingJobDefinition.CheckpointConfig.LocalPath != nil { - f3f1.SetLocalPath(*r.ko.Spec.TrainingJobDefinition.CheckpointConfig.LocalPath) + f2f1.SetLocalPath(*r.ko.Spec.TrainingJobDefinition.CheckpointConfig.LocalPath) } if r.ko.Spec.TrainingJobDefinition.CheckpointConfig.S3URI != nil { - f3f1.SetS3Uri(*r.ko.Spec.TrainingJobDefinition.CheckpointConfig.S3URI) + f2f1.SetS3Uri(*r.ko.Spec.TrainingJobDefinition.CheckpointConfig.S3URI) } - f3.SetCheckpointConfig(f3f1) + f2.SetCheckpointConfig(f2f1) } if r.ko.Spec.TrainingJobDefinition.DefinitionName != nil { - f3.SetDefinitionName(*r.ko.Spec.TrainingJobDefinition.DefinitionName) + f2.SetDefinitionName(*r.ko.Spec.TrainingJobDefinition.DefinitionName) } if r.ko.Spec.TrainingJobDefinition.EnableInterContainerTrafficEncryption != nil { - f3.SetEnableInterContainerTrafficEncryption(*r.ko.Spec.TrainingJobDefinition.EnableInterContainerTrafficEncryption) + f2.SetEnableInterContainerTrafficEncryption(*r.ko.Spec.TrainingJobDefinition.EnableInterContainerTrafficEncryption) } if r.ko.Spec.TrainingJobDefinition.EnableManagedSpotTraining != nil { - f3.SetEnableManagedSpotTraining(*r.ko.Spec.TrainingJobDefinition.EnableManagedSpotTraining) + f2.SetEnableManagedSpotTraining(*r.ko.Spec.TrainingJobDefinition.EnableManagedSpotTraining) } if r.ko.Spec.TrainingJobDefinition.EnableNetworkIsolation != nil { - f3.SetEnableNetworkIsolation(*r.ko.Spec.TrainingJobDefinition.EnableNetworkIsolation) + f2.SetEnableNetworkIsolation(*r.ko.Spec.TrainingJobDefinition.EnableNetworkIsolation) } if r.ko.Spec.TrainingJobDefinition.HyperParameterRanges != nil { - f3f6 := &svcsdk.ParameterRanges{} + f2f6 := &svcsdk.ParameterRanges{} if r.ko.Spec.TrainingJobDefinition.HyperParameterRanges.CategoricalParameterRanges != nil { - f3f6f0 := []*svcsdk.CategoricalParameterRange{} - for _, f3f6f0iter := range r.ko.Spec.TrainingJobDefinition.HyperParameterRanges.CategoricalParameterRanges { - f3f6f0elem := &svcsdk.CategoricalParameterRange{} - if f3f6f0iter.Name != nil { - f3f6f0elem.SetName(*f3f6f0iter.Name) - } - if f3f6f0iter.Values != nil { - f3f6f0elemf1 := []*string{} - for _, f3f6f0elemf1iter := range f3f6f0iter.Values { - var f3f6f0elemf1elem string - f3f6f0elemf1elem = *f3f6f0elemf1iter - f3f6f0elemf1 = append(f3f6f0elemf1, &f3f6f0elemf1elem) + f2f6f0 := []*svcsdk.CategoricalParameterRange{} + for _, f2f6f0iter := range r.ko.Spec.TrainingJobDefinition.HyperParameterRanges.CategoricalParameterRanges { + f2f6f0elem := &svcsdk.CategoricalParameterRange{} + if f2f6f0iter.Name != nil { + f2f6f0elem.SetName(*f2f6f0iter.Name) + } + if f2f6f0iter.Values != nil { + f2f6f0elemf1 := []*string{} + for _, f2f6f0elemf1iter := range f2f6f0iter.Values { + var f2f6f0elemf1elem string + f2f6f0elemf1elem = *f2f6f0elemf1iter + f2f6f0elemf1 = append(f2f6f0elemf1, &f2f6f0elemf1elem) } - f3f6f0elem.SetValues(f3f6f0elemf1) + f2f6f0elem.SetValues(f2f6f0elemf1) } - f3f6f0 = append(f3f6f0, f3f6f0elem) + f2f6f0 = append(f2f6f0, f2f6f0elem) } - f3f6.SetCategoricalParameterRanges(f3f6f0) + f2f6.SetCategoricalParameterRanges(f2f6f0) } if r.ko.Spec.TrainingJobDefinition.HyperParameterRanges.ContinuousParameterRanges != nil { - f3f6f1 := []*svcsdk.ContinuousParameterRange{} - for _, f3f6f1iter := range r.ko.Spec.TrainingJobDefinition.HyperParameterRanges.ContinuousParameterRanges { - f3f6f1elem := &svcsdk.ContinuousParameterRange{} - if f3f6f1iter.MaxValue != nil { - f3f6f1elem.SetMaxValue(*f3f6f1iter.MaxValue) + f2f6f1 := []*svcsdk.ContinuousParameterRange{} + for _, f2f6f1iter := range r.ko.Spec.TrainingJobDefinition.HyperParameterRanges.ContinuousParameterRanges { + f2f6f1elem := &svcsdk.ContinuousParameterRange{} + if f2f6f1iter.MaxValue != nil { + f2f6f1elem.SetMaxValue(*f2f6f1iter.MaxValue) } - if f3f6f1iter.MinValue != nil { - f3f6f1elem.SetMinValue(*f3f6f1iter.MinValue) + if f2f6f1iter.MinValue != nil { + f2f6f1elem.SetMinValue(*f2f6f1iter.MinValue) } - if f3f6f1iter.Name != nil { - f3f6f1elem.SetName(*f3f6f1iter.Name) + if f2f6f1iter.Name != nil { + f2f6f1elem.SetName(*f2f6f1iter.Name) } - if f3f6f1iter.ScalingType != nil { - f3f6f1elem.SetScalingType(*f3f6f1iter.ScalingType) + if f2f6f1iter.ScalingType != nil { + f2f6f1elem.SetScalingType(*f2f6f1iter.ScalingType) } - f3f6f1 = append(f3f6f1, f3f6f1elem) + f2f6f1 = append(f2f6f1, f2f6f1elem) } - f3f6.SetContinuousParameterRanges(f3f6f1) + f2f6.SetContinuousParameterRanges(f2f6f1) } if r.ko.Spec.TrainingJobDefinition.HyperParameterRanges.IntegerParameterRanges != nil { - f3f6f2 := []*svcsdk.IntegerParameterRange{} - for _, f3f6f2iter := range r.ko.Spec.TrainingJobDefinition.HyperParameterRanges.IntegerParameterRanges { - f3f6f2elem := &svcsdk.IntegerParameterRange{} - if f3f6f2iter.MaxValue != nil { - f3f6f2elem.SetMaxValue(*f3f6f2iter.MaxValue) + f2f6f2 := []*svcsdk.IntegerParameterRange{} + for _, f2f6f2iter := range r.ko.Spec.TrainingJobDefinition.HyperParameterRanges.IntegerParameterRanges { + f2f6f2elem := &svcsdk.IntegerParameterRange{} + if f2f6f2iter.MaxValue != nil { + f2f6f2elem.SetMaxValue(*f2f6f2iter.MaxValue) } - if f3f6f2iter.MinValue != nil { - f3f6f2elem.SetMinValue(*f3f6f2iter.MinValue) + if f2f6f2iter.MinValue != nil { + f2f6f2elem.SetMinValue(*f2f6f2iter.MinValue) } - if f3f6f2iter.Name != nil { - f3f6f2elem.SetName(*f3f6f2iter.Name) + if f2f6f2iter.Name != nil { + f2f6f2elem.SetName(*f2f6f2iter.Name) } - if f3f6f2iter.ScalingType != nil { - f3f6f2elem.SetScalingType(*f3f6f2iter.ScalingType) + if f2f6f2iter.ScalingType != nil { + f2f6f2elem.SetScalingType(*f2f6f2iter.ScalingType) } - f3f6f2 = append(f3f6f2, f3f6f2elem) + f2f6f2 = append(f2f6f2, f2f6f2elem) } - f3f6.SetIntegerParameterRanges(f3f6f2) + f2f6.SetIntegerParameterRanges(f2f6f2) } - f3.SetHyperParameterRanges(f3f6) + f2.SetHyperParameterRanges(f2f6) } if r.ko.Spec.TrainingJobDefinition.InputDataConfig != nil { - f3f7 := []*svcsdk.Channel{} - for _, f3f7iter := range r.ko.Spec.TrainingJobDefinition.InputDataConfig { - f3f7elem := &svcsdk.Channel{} - if f3f7iter.ChannelName != nil { - f3f7elem.SetChannelName(*f3f7iter.ChannelName) - } - if f3f7iter.CompressionType != nil { - f3f7elem.SetCompressionType(*f3f7iter.CompressionType) - } - if f3f7iter.ContentType != nil { - f3f7elem.SetContentType(*f3f7iter.ContentType) - } - if f3f7iter.DataSource != nil { - f3f7elemf3 := &svcsdk.DataSource{} - if f3f7iter.DataSource.FileSystemDataSource != nil { - f3f7elemf3f0 := &svcsdk.FileSystemDataSource{} - if f3f7iter.DataSource.FileSystemDataSource.DirectoryPath != nil { - f3f7elemf3f0.SetDirectoryPath(*f3f7iter.DataSource.FileSystemDataSource.DirectoryPath) + f2f7 := []*svcsdk.Channel{} + for _, f2f7iter := range r.ko.Spec.TrainingJobDefinition.InputDataConfig { + f2f7elem := &svcsdk.Channel{} + if f2f7iter.ChannelName != nil { + f2f7elem.SetChannelName(*f2f7iter.ChannelName) + } + if f2f7iter.CompressionType != nil { + f2f7elem.SetCompressionType(*f2f7iter.CompressionType) + } + if f2f7iter.ContentType != nil { + f2f7elem.SetContentType(*f2f7iter.ContentType) + } + if f2f7iter.DataSource != nil { + f2f7elemf3 := &svcsdk.DataSource{} + if f2f7iter.DataSource.FileSystemDataSource != nil { + f2f7elemf3f0 := &svcsdk.FileSystemDataSource{} + if f2f7iter.DataSource.FileSystemDataSource.DirectoryPath != nil { + f2f7elemf3f0.SetDirectoryPath(*f2f7iter.DataSource.FileSystemDataSource.DirectoryPath) } - if f3f7iter.DataSource.FileSystemDataSource.FileSystemAccessMode != nil { - f3f7elemf3f0.SetFileSystemAccessMode(*f3f7iter.DataSource.FileSystemDataSource.FileSystemAccessMode) + if f2f7iter.DataSource.FileSystemDataSource.FileSystemAccessMode != nil { + f2f7elemf3f0.SetFileSystemAccessMode(*f2f7iter.DataSource.FileSystemDataSource.FileSystemAccessMode) } - if f3f7iter.DataSource.FileSystemDataSource.FileSystemID != nil { - f3f7elemf3f0.SetFileSystemId(*f3f7iter.DataSource.FileSystemDataSource.FileSystemID) + if f2f7iter.DataSource.FileSystemDataSource.FileSystemID != nil { + f2f7elemf3f0.SetFileSystemId(*f2f7iter.DataSource.FileSystemDataSource.FileSystemID) } - if f3f7iter.DataSource.FileSystemDataSource.FileSystemType != nil { - f3f7elemf3f0.SetFileSystemType(*f3f7iter.DataSource.FileSystemDataSource.FileSystemType) + if f2f7iter.DataSource.FileSystemDataSource.FileSystemType != nil { + f2f7elemf3f0.SetFileSystemType(*f2f7iter.DataSource.FileSystemDataSource.FileSystemType) } - f3f7elemf3.SetFileSystemDataSource(f3f7elemf3f0) - } - if f3f7iter.DataSource.S3DataSource != nil { - f3f7elemf3f1 := &svcsdk.S3DataSource{} - if f3f7iter.DataSource.S3DataSource.AttributeNames != nil { - f3f7elemf3f1f0 := []*string{} - for _, f3f7elemf3f1f0iter := range f3f7iter.DataSource.S3DataSource.AttributeNames { - var f3f7elemf3f1f0elem string - f3f7elemf3f1f0elem = *f3f7elemf3f1f0iter - f3f7elemf3f1f0 = append(f3f7elemf3f1f0, &f3f7elemf3f1f0elem) + f2f7elemf3.SetFileSystemDataSource(f2f7elemf3f0) + } + if f2f7iter.DataSource.S3DataSource != nil { + f2f7elemf3f1 := &svcsdk.S3DataSource{} + if f2f7iter.DataSource.S3DataSource.AttributeNames != nil { + f2f7elemf3f1f0 := []*string{} + for _, f2f7elemf3f1f0iter := range f2f7iter.DataSource.S3DataSource.AttributeNames { + var f2f7elemf3f1f0elem string + f2f7elemf3f1f0elem = *f2f7elemf3f1f0iter + f2f7elemf3f1f0 = append(f2f7elemf3f1f0, &f2f7elemf3f1f0elem) } - f3f7elemf3f1.SetAttributeNames(f3f7elemf3f1f0) + f2f7elemf3f1.SetAttributeNames(f2f7elemf3f1f0) } - if f3f7iter.DataSource.S3DataSource.S3DataDistributionType != nil { - f3f7elemf3f1.SetS3DataDistributionType(*f3f7iter.DataSource.S3DataSource.S3DataDistributionType) + if f2f7iter.DataSource.S3DataSource.S3DataDistributionType != nil { + f2f7elemf3f1.SetS3DataDistributionType(*f2f7iter.DataSource.S3DataSource.S3DataDistributionType) } - if f3f7iter.DataSource.S3DataSource.S3DataType != nil { - f3f7elemf3f1.SetS3DataType(*f3f7iter.DataSource.S3DataSource.S3DataType) + if f2f7iter.DataSource.S3DataSource.S3DataType != nil { + f2f7elemf3f1.SetS3DataType(*f2f7iter.DataSource.S3DataSource.S3DataType) } - if f3f7iter.DataSource.S3DataSource.S3URI != nil { - f3f7elemf3f1.SetS3Uri(*f3f7iter.DataSource.S3DataSource.S3URI) + if f2f7iter.DataSource.S3DataSource.S3URI != nil { + f2f7elemf3f1.SetS3Uri(*f2f7iter.DataSource.S3DataSource.S3URI) } - f3f7elemf3.SetS3DataSource(f3f7elemf3f1) + f2f7elemf3.SetS3DataSource(f2f7elemf3f1) } - f3f7elem.SetDataSource(f3f7elemf3) + f2f7elem.SetDataSource(f2f7elemf3) } - if f3f7iter.InputMode != nil { - f3f7elem.SetInputMode(*f3f7iter.InputMode) + if f2f7iter.InputMode != nil { + f2f7elem.SetInputMode(*f2f7iter.InputMode) } - if f3f7iter.RecordWrapperType != nil { - f3f7elem.SetRecordWrapperType(*f3f7iter.RecordWrapperType) + if f2f7iter.RecordWrapperType != nil { + f2f7elem.SetRecordWrapperType(*f2f7iter.RecordWrapperType) } - if f3f7iter.ShuffleConfig != nil { - f3f7elemf6 := &svcsdk.ShuffleConfig{} - if f3f7iter.ShuffleConfig.Seed != nil { - f3f7elemf6.SetSeed(*f3f7iter.ShuffleConfig.Seed) + if f2f7iter.ShuffleConfig != nil { + f2f7elemf6 := &svcsdk.ShuffleConfig{} + if f2f7iter.ShuffleConfig.Seed != nil { + f2f7elemf6.SetSeed(*f2f7iter.ShuffleConfig.Seed) } - f3f7elem.SetShuffleConfig(f3f7elemf6) + f2f7elem.SetShuffleConfig(f2f7elemf6) } - f3f7 = append(f3f7, f3f7elem) + f2f7 = append(f2f7, f2f7elem) } - f3.SetInputDataConfig(f3f7) + f2.SetInputDataConfig(f2f7) } if r.ko.Spec.TrainingJobDefinition.OutputDataConfig != nil { - f3f8 := &svcsdk.OutputDataConfig{} + f2f8 := &svcsdk.OutputDataConfig{} if r.ko.Spec.TrainingJobDefinition.OutputDataConfig.KMSKeyID != nil { - f3f8.SetKmsKeyId(*r.ko.Spec.TrainingJobDefinition.OutputDataConfig.KMSKeyID) + f2f8.SetKmsKeyId(*r.ko.Spec.TrainingJobDefinition.OutputDataConfig.KMSKeyID) } if r.ko.Spec.TrainingJobDefinition.OutputDataConfig.S3OutputPath != nil { - f3f8.SetS3OutputPath(*r.ko.Spec.TrainingJobDefinition.OutputDataConfig.S3OutputPath) + f2f8.SetS3OutputPath(*r.ko.Spec.TrainingJobDefinition.OutputDataConfig.S3OutputPath) } - f3.SetOutputDataConfig(f3f8) + f2.SetOutputDataConfig(f2f8) } if r.ko.Spec.TrainingJobDefinition.ResourceConfig != nil { - f3f9 := &svcsdk.ResourceConfig{} + f2f9 := &svcsdk.ResourceConfig{} if r.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceCount != nil { - f3f9.SetInstanceCount(*r.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceCount) + f2f9.SetInstanceCount(*r.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceCount) } if r.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceType != nil { - f3f9.SetInstanceType(*r.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceType) + f2f9.SetInstanceType(*r.ko.Spec.TrainingJobDefinition.ResourceConfig.InstanceType) } if r.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeKMSKeyID != nil { - f3f9.SetVolumeKmsKeyId(*r.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeKMSKeyID) + f2f9.SetVolumeKmsKeyId(*r.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeKMSKeyID) } if r.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeSizeInGB != nil { - f3f9.SetVolumeSizeInGB(*r.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeSizeInGB) + f2f9.SetVolumeSizeInGB(*r.ko.Spec.TrainingJobDefinition.ResourceConfig.VolumeSizeInGB) } - f3.SetResourceConfig(f3f9) + f2.SetResourceConfig(f2f9) } if r.ko.Spec.TrainingJobDefinition.RoleARN != nil { - f3.SetRoleArn(*r.ko.Spec.TrainingJobDefinition.RoleARN) + f2.SetRoleArn(*r.ko.Spec.TrainingJobDefinition.RoleARN) } if r.ko.Spec.TrainingJobDefinition.StaticHyperParameters != nil { - f3f11 := map[string]*string{} - for f3f11key, f3f11valiter := range r.ko.Spec.TrainingJobDefinition.StaticHyperParameters { - var f3f11val string - f3f11val = *f3f11valiter - f3f11[f3f11key] = &f3f11val + f2f11 := map[string]*string{} + for f2f11key, f2f11valiter := range r.ko.Spec.TrainingJobDefinition.StaticHyperParameters { + var f2f11val string + f2f11val = *f2f11valiter + f2f11[f2f11key] = &f2f11val } - f3.SetStaticHyperParameters(f3f11) + f2.SetStaticHyperParameters(f2f11) } if r.ko.Spec.TrainingJobDefinition.StoppingCondition != nil { - f3f12 := &svcsdk.StoppingCondition{} + f2f12 := &svcsdk.StoppingCondition{} if r.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxRuntimeInSeconds != nil { - f3f12.SetMaxRuntimeInSeconds(*r.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxRuntimeInSeconds) + f2f12.SetMaxRuntimeInSeconds(*r.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxRuntimeInSeconds) } if r.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxWaitTimeInSeconds != nil { - f3f12.SetMaxWaitTimeInSeconds(*r.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxWaitTimeInSeconds) + f2f12.SetMaxWaitTimeInSeconds(*r.ko.Spec.TrainingJobDefinition.StoppingCondition.MaxWaitTimeInSeconds) } - f3.SetStoppingCondition(f3f12) + f2.SetStoppingCondition(f2f12) } if r.ko.Spec.TrainingJobDefinition.TuningObjective != nil { - f3f13 := &svcsdk.HyperParameterTuningJobObjective{} + f2f13 := &svcsdk.HyperParameterTuningJobObjective{} if r.ko.Spec.TrainingJobDefinition.TuningObjective.MetricName != nil { - f3f13.SetMetricName(*r.ko.Spec.TrainingJobDefinition.TuningObjective.MetricName) + f2f13.SetMetricName(*r.ko.Spec.TrainingJobDefinition.TuningObjective.MetricName) } if r.ko.Spec.TrainingJobDefinition.TuningObjective.Type != nil { - f3f13.SetType(*r.ko.Spec.TrainingJobDefinition.TuningObjective.Type) + f2f13.SetType(*r.ko.Spec.TrainingJobDefinition.TuningObjective.Type) } - f3.SetTuningObjective(f3f13) + f2.SetTuningObjective(f2f13) } if r.ko.Spec.TrainingJobDefinition.VPCConfig != nil { - f3f14 := &svcsdk.VpcConfig{} + f2f14 := &svcsdk.VpcConfig{} if r.ko.Spec.TrainingJobDefinition.VPCConfig.SecurityGroupIDs != nil { - f3f14f0 := []*string{} - for _, f3f14f0iter := range r.ko.Spec.TrainingJobDefinition.VPCConfig.SecurityGroupIDs { - var f3f14f0elem string - f3f14f0elem = *f3f14f0iter - f3f14f0 = append(f3f14f0, &f3f14f0elem) + f2f14f0 := []*string{} + for _, f2f14f0iter := range r.ko.Spec.TrainingJobDefinition.VPCConfig.SecurityGroupIDs { + var f2f14f0elem string + f2f14f0elem = *f2f14f0iter + f2f14f0 = append(f2f14f0, &f2f14f0elem) } - f3f14.SetSecurityGroupIds(f3f14f0) + f2f14.SetSecurityGroupIds(f2f14f0) } if r.ko.Spec.TrainingJobDefinition.VPCConfig.Subnets != nil { - f3f14f1 := []*string{} - for _, f3f14f1iter := range r.ko.Spec.TrainingJobDefinition.VPCConfig.Subnets { - var f3f14f1elem string - f3f14f1elem = *f3f14f1iter - f3f14f1 = append(f3f14f1, &f3f14f1elem) + f2f14f1 := []*string{} + for _, f2f14f1iter := range r.ko.Spec.TrainingJobDefinition.VPCConfig.Subnets { + var f2f14f1elem string + f2f14f1elem = *f2f14f1iter + f2f14f1 = append(f2f14f1, &f2f14f1elem) } - f3f14.SetSubnets(f3f14f1) + f2f14.SetSubnets(f2f14f1) } - f3.SetVpcConfig(f3f14) + f2.SetVpcConfig(f2f14) } - res.SetTrainingJobDefinition(f3) + res.SetTrainingJobDefinition(f2) } if r.ko.Spec.TrainingJobDefinitions != nil { - f4 := []*svcsdk.HyperParameterTrainingJobDefinition{} - for _, f4iter := range r.ko.Spec.TrainingJobDefinitions { - f4elem := &svcsdk.HyperParameterTrainingJobDefinition{} - if f4iter.AlgorithmSpecification != nil { - f4elemf0 := &svcsdk.HyperParameterAlgorithmSpecification{} - if f4iter.AlgorithmSpecification.AlgorithmName != nil { - f4elemf0.SetAlgorithmName(*f4iter.AlgorithmSpecification.AlgorithmName) - } - if f4iter.AlgorithmSpecification.MetricDefinitions != nil { - f4elemf0f1 := []*svcsdk.MetricDefinition{} - for _, f4elemf0f1iter := range f4iter.AlgorithmSpecification.MetricDefinitions { - f4elemf0f1elem := &svcsdk.MetricDefinition{} - if f4elemf0f1iter.Name != nil { - f4elemf0f1elem.SetName(*f4elemf0f1iter.Name) + f3 := []*svcsdk.HyperParameterTrainingJobDefinition{} + for _, f3iter := range r.ko.Spec.TrainingJobDefinitions { + f3elem := &svcsdk.HyperParameterTrainingJobDefinition{} + if f3iter.AlgorithmSpecification != nil { + f3elemf0 := &svcsdk.HyperParameterAlgorithmSpecification{} + if f3iter.AlgorithmSpecification.AlgorithmName != nil { + f3elemf0.SetAlgorithmName(*f3iter.AlgorithmSpecification.AlgorithmName) + } + if f3iter.AlgorithmSpecification.MetricDefinitions != nil { + f3elemf0f1 := []*svcsdk.MetricDefinition{} + for _, f3elemf0f1iter := range f3iter.AlgorithmSpecification.MetricDefinitions { + f3elemf0f1elem := &svcsdk.MetricDefinition{} + if f3elemf0f1iter.Name != nil { + f3elemf0f1elem.SetName(*f3elemf0f1iter.Name) } - if f4elemf0f1iter.Regex != nil { - f4elemf0f1elem.SetRegex(*f4elemf0f1iter.Regex) + if f3elemf0f1iter.Regex != nil { + f3elemf0f1elem.SetRegex(*f3elemf0f1iter.Regex) } - f4elemf0f1 = append(f4elemf0f1, f4elemf0f1elem) + f3elemf0f1 = append(f3elemf0f1, f3elemf0f1elem) } - f4elemf0.SetMetricDefinitions(f4elemf0f1) + f3elemf0.SetMetricDefinitions(f3elemf0f1) } - if f4iter.AlgorithmSpecification.TrainingImage != nil { - f4elemf0.SetTrainingImage(*f4iter.AlgorithmSpecification.TrainingImage) + if f3iter.AlgorithmSpecification.TrainingImage != nil { + f3elemf0.SetTrainingImage(*f3iter.AlgorithmSpecification.TrainingImage) } - if f4iter.AlgorithmSpecification.TrainingInputMode != nil { - f4elemf0.SetTrainingInputMode(*f4iter.AlgorithmSpecification.TrainingInputMode) + if f3iter.AlgorithmSpecification.TrainingInputMode != nil { + f3elemf0.SetTrainingInputMode(*f3iter.AlgorithmSpecification.TrainingInputMode) } - f4elem.SetAlgorithmSpecification(f4elemf0) + f3elem.SetAlgorithmSpecification(f3elemf0) } - if f4iter.CheckpointConfig != nil { - f4elemf1 := &svcsdk.CheckpointConfig{} - if f4iter.CheckpointConfig.LocalPath != nil { - f4elemf1.SetLocalPath(*f4iter.CheckpointConfig.LocalPath) + if f3iter.CheckpointConfig != nil { + f3elemf1 := &svcsdk.CheckpointConfig{} + if f3iter.CheckpointConfig.LocalPath != nil { + f3elemf1.SetLocalPath(*f3iter.CheckpointConfig.LocalPath) } - if f4iter.CheckpointConfig.S3URI != nil { - f4elemf1.SetS3Uri(*f4iter.CheckpointConfig.S3URI) + if f3iter.CheckpointConfig.S3URI != nil { + f3elemf1.SetS3Uri(*f3iter.CheckpointConfig.S3URI) } - f4elem.SetCheckpointConfig(f4elemf1) + f3elem.SetCheckpointConfig(f3elemf1) } - if f4iter.DefinitionName != nil { - f4elem.SetDefinitionName(*f4iter.DefinitionName) + if f3iter.DefinitionName != nil { + f3elem.SetDefinitionName(*f3iter.DefinitionName) } - if f4iter.EnableInterContainerTrafficEncryption != nil { - f4elem.SetEnableInterContainerTrafficEncryption(*f4iter.EnableInterContainerTrafficEncryption) + if f3iter.EnableInterContainerTrafficEncryption != nil { + f3elem.SetEnableInterContainerTrafficEncryption(*f3iter.EnableInterContainerTrafficEncryption) } - if f4iter.EnableManagedSpotTraining != nil { - f4elem.SetEnableManagedSpotTraining(*f4iter.EnableManagedSpotTraining) + if f3iter.EnableManagedSpotTraining != nil { + f3elem.SetEnableManagedSpotTraining(*f3iter.EnableManagedSpotTraining) } - if f4iter.EnableNetworkIsolation != nil { - f4elem.SetEnableNetworkIsolation(*f4iter.EnableNetworkIsolation) + if f3iter.EnableNetworkIsolation != nil { + f3elem.SetEnableNetworkIsolation(*f3iter.EnableNetworkIsolation) } - if f4iter.HyperParameterRanges != nil { - f4elemf6 := &svcsdk.ParameterRanges{} - if f4iter.HyperParameterRanges.CategoricalParameterRanges != nil { - f4elemf6f0 := []*svcsdk.CategoricalParameterRange{} - for _, f4elemf6f0iter := range f4iter.HyperParameterRanges.CategoricalParameterRanges { - f4elemf6f0elem := &svcsdk.CategoricalParameterRange{} - if f4elemf6f0iter.Name != nil { - f4elemf6f0elem.SetName(*f4elemf6f0iter.Name) + if f3iter.HyperParameterRanges != nil { + f3elemf6 := &svcsdk.ParameterRanges{} + if f3iter.HyperParameterRanges.CategoricalParameterRanges != nil { + f3elemf6f0 := []*svcsdk.CategoricalParameterRange{} + for _, f3elemf6f0iter := range f3iter.HyperParameterRanges.CategoricalParameterRanges { + f3elemf6f0elem := &svcsdk.CategoricalParameterRange{} + if f3elemf6f0iter.Name != nil { + f3elemf6f0elem.SetName(*f3elemf6f0iter.Name) } - if f4elemf6f0iter.Values != nil { - f4elemf6f0elemf1 := []*string{} - for _, f4elemf6f0elemf1iter := range f4elemf6f0iter.Values { - var f4elemf6f0elemf1elem string - f4elemf6f0elemf1elem = *f4elemf6f0elemf1iter - f4elemf6f0elemf1 = append(f4elemf6f0elemf1, &f4elemf6f0elemf1elem) + if f3elemf6f0iter.Values != nil { + f3elemf6f0elemf1 := []*string{} + for _, f3elemf6f0elemf1iter := range f3elemf6f0iter.Values { + var f3elemf6f0elemf1elem string + f3elemf6f0elemf1elem = *f3elemf6f0elemf1iter + f3elemf6f0elemf1 = append(f3elemf6f0elemf1, &f3elemf6f0elemf1elem) } - f4elemf6f0elem.SetValues(f4elemf6f0elemf1) + f3elemf6f0elem.SetValues(f3elemf6f0elemf1) } - f4elemf6f0 = append(f4elemf6f0, f4elemf6f0elem) + f3elemf6f0 = append(f3elemf6f0, f3elemf6f0elem) } - f4elemf6.SetCategoricalParameterRanges(f4elemf6f0) + f3elemf6.SetCategoricalParameterRanges(f3elemf6f0) } - if f4iter.HyperParameterRanges.ContinuousParameterRanges != nil { - f4elemf6f1 := []*svcsdk.ContinuousParameterRange{} - for _, f4elemf6f1iter := range f4iter.HyperParameterRanges.ContinuousParameterRanges { - f4elemf6f1elem := &svcsdk.ContinuousParameterRange{} - if f4elemf6f1iter.MaxValue != nil { - f4elemf6f1elem.SetMaxValue(*f4elemf6f1iter.MaxValue) + if f3iter.HyperParameterRanges.ContinuousParameterRanges != nil { + f3elemf6f1 := []*svcsdk.ContinuousParameterRange{} + for _, f3elemf6f1iter := range f3iter.HyperParameterRanges.ContinuousParameterRanges { + f3elemf6f1elem := &svcsdk.ContinuousParameterRange{} + if f3elemf6f1iter.MaxValue != nil { + f3elemf6f1elem.SetMaxValue(*f3elemf6f1iter.MaxValue) } - if f4elemf6f1iter.MinValue != nil { - f4elemf6f1elem.SetMinValue(*f4elemf6f1iter.MinValue) + if f3elemf6f1iter.MinValue != nil { + f3elemf6f1elem.SetMinValue(*f3elemf6f1iter.MinValue) } - if f4elemf6f1iter.Name != nil { - f4elemf6f1elem.SetName(*f4elemf6f1iter.Name) + if f3elemf6f1iter.Name != nil { + f3elemf6f1elem.SetName(*f3elemf6f1iter.Name) } - if f4elemf6f1iter.ScalingType != nil { - f4elemf6f1elem.SetScalingType(*f4elemf6f1iter.ScalingType) + if f3elemf6f1iter.ScalingType != nil { + f3elemf6f1elem.SetScalingType(*f3elemf6f1iter.ScalingType) } - f4elemf6f1 = append(f4elemf6f1, f4elemf6f1elem) + f3elemf6f1 = append(f3elemf6f1, f3elemf6f1elem) } - f4elemf6.SetContinuousParameterRanges(f4elemf6f1) + f3elemf6.SetContinuousParameterRanges(f3elemf6f1) } - if f4iter.HyperParameterRanges.IntegerParameterRanges != nil { - f4elemf6f2 := []*svcsdk.IntegerParameterRange{} - for _, f4elemf6f2iter := range f4iter.HyperParameterRanges.IntegerParameterRanges { - f4elemf6f2elem := &svcsdk.IntegerParameterRange{} - if f4elemf6f2iter.MaxValue != nil { - f4elemf6f2elem.SetMaxValue(*f4elemf6f2iter.MaxValue) + if f3iter.HyperParameterRanges.IntegerParameterRanges != nil { + f3elemf6f2 := []*svcsdk.IntegerParameterRange{} + for _, f3elemf6f2iter := range f3iter.HyperParameterRanges.IntegerParameterRanges { + f3elemf6f2elem := &svcsdk.IntegerParameterRange{} + if f3elemf6f2iter.MaxValue != nil { + f3elemf6f2elem.SetMaxValue(*f3elemf6f2iter.MaxValue) } - if f4elemf6f2iter.MinValue != nil { - f4elemf6f2elem.SetMinValue(*f4elemf6f2iter.MinValue) + if f3elemf6f2iter.MinValue != nil { + f3elemf6f2elem.SetMinValue(*f3elemf6f2iter.MinValue) } - if f4elemf6f2iter.Name != nil { - f4elemf6f2elem.SetName(*f4elemf6f2iter.Name) + if f3elemf6f2iter.Name != nil { + f3elemf6f2elem.SetName(*f3elemf6f2iter.Name) } - if f4elemf6f2iter.ScalingType != nil { - f4elemf6f2elem.SetScalingType(*f4elemf6f2iter.ScalingType) + if f3elemf6f2iter.ScalingType != nil { + f3elemf6f2elem.SetScalingType(*f3elemf6f2iter.ScalingType) } - f4elemf6f2 = append(f4elemf6f2, f4elemf6f2elem) + f3elemf6f2 = append(f3elemf6f2, f3elemf6f2elem) } - f4elemf6.SetIntegerParameterRanges(f4elemf6f2) + f3elemf6.SetIntegerParameterRanges(f3elemf6f2) } - f4elem.SetHyperParameterRanges(f4elemf6) + f3elem.SetHyperParameterRanges(f3elemf6) } - if f4iter.InputDataConfig != nil { - f4elemf7 := []*svcsdk.Channel{} - for _, f4elemf7iter := range f4iter.InputDataConfig { - f4elemf7elem := &svcsdk.Channel{} - if f4elemf7iter.ChannelName != nil { - f4elemf7elem.SetChannelName(*f4elemf7iter.ChannelName) + if f3iter.InputDataConfig != nil { + f3elemf7 := []*svcsdk.Channel{} + for _, f3elemf7iter := range f3iter.InputDataConfig { + f3elemf7elem := &svcsdk.Channel{} + if f3elemf7iter.ChannelName != nil { + f3elemf7elem.SetChannelName(*f3elemf7iter.ChannelName) } - if f4elemf7iter.CompressionType != nil { - f4elemf7elem.SetCompressionType(*f4elemf7iter.CompressionType) + if f3elemf7iter.CompressionType != nil { + f3elemf7elem.SetCompressionType(*f3elemf7iter.CompressionType) } - if f4elemf7iter.ContentType != nil { - f4elemf7elem.SetContentType(*f4elemf7iter.ContentType) + if f3elemf7iter.ContentType != nil { + f3elemf7elem.SetContentType(*f3elemf7iter.ContentType) } - if f4elemf7iter.DataSource != nil { - f4elemf7elemf3 := &svcsdk.DataSource{} - if f4elemf7iter.DataSource.FileSystemDataSource != nil { - f4elemf7elemf3f0 := &svcsdk.FileSystemDataSource{} - if f4elemf7iter.DataSource.FileSystemDataSource.DirectoryPath != nil { - f4elemf7elemf3f0.SetDirectoryPath(*f4elemf7iter.DataSource.FileSystemDataSource.DirectoryPath) + if f3elemf7iter.DataSource != nil { + f3elemf7elemf3 := &svcsdk.DataSource{} + if f3elemf7iter.DataSource.FileSystemDataSource != nil { + f3elemf7elemf3f0 := &svcsdk.FileSystemDataSource{} + if f3elemf7iter.DataSource.FileSystemDataSource.DirectoryPath != nil { + f3elemf7elemf3f0.SetDirectoryPath(*f3elemf7iter.DataSource.FileSystemDataSource.DirectoryPath) } - if f4elemf7iter.DataSource.FileSystemDataSource.FileSystemAccessMode != nil { - f4elemf7elemf3f0.SetFileSystemAccessMode(*f4elemf7iter.DataSource.FileSystemDataSource.FileSystemAccessMode) + if f3elemf7iter.DataSource.FileSystemDataSource.FileSystemAccessMode != nil { + f3elemf7elemf3f0.SetFileSystemAccessMode(*f3elemf7iter.DataSource.FileSystemDataSource.FileSystemAccessMode) } - if f4elemf7iter.DataSource.FileSystemDataSource.FileSystemID != nil { - f4elemf7elemf3f0.SetFileSystemId(*f4elemf7iter.DataSource.FileSystemDataSource.FileSystemID) + if f3elemf7iter.DataSource.FileSystemDataSource.FileSystemID != nil { + f3elemf7elemf3f0.SetFileSystemId(*f3elemf7iter.DataSource.FileSystemDataSource.FileSystemID) } - if f4elemf7iter.DataSource.FileSystemDataSource.FileSystemType != nil { - f4elemf7elemf3f0.SetFileSystemType(*f4elemf7iter.DataSource.FileSystemDataSource.FileSystemType) + if f3elemf7iter.DataSource.FileSystemDataSource.FileSystemType != nil { + f3elemf7elemf3f0.SetFileSystemType(*f3elemf7iter.DataSource.FileSystemDataSource.FileSystemType) } - f4elemf7elemf3.SetFileSystemDataSource(f4elemf7elemf3f0) + f3elemf7elemf3.SetFileSystemDataSource(f3elemf7elemf3f0) } - if f4elemf7iter.DataSource.S3DataSource != nil { - f4elemf7elemf3f1 := &svcsdk.S3DataSource{} - if f4elemf7iter.DataSource.S3DataSource.AttributeNames != nil { - f4elemf7elemf3f1f0 := []*string{} - for _, f4elemf7elemf3f1f0iter := range f4elemf7iter.DataSource.S3DataSource.AttributeNames { - var f4elemf7elemf3f1f0elem string - f4elemf7elemf3f1f0elem = *f4elemf7elemf3f1f0iter - f4elemf7elemf3f1f0 = append(f4elemf7elemf3f1f0, &f4elemf7elemf3f1f0elem) + if f3elemf7iter.DataSource.S3DataSource != nil { + f3elemf7elemf3f1 := &svcsdk.S3DataSource{} + if f3elemf7iter.DataSource.S3DataSource.AttributeNames != nil { + f3elemf7elemf3f1f0 := []*string{} + for _, f3elemf7elemf3f1f0iter := range f3elemf7iter.DataSource.S3DataSource.AttributeNames { + var f3elemf7elemf3f1f0elem string + f3elemf7elemf3f1f0elem = *f3elemf7elemf3f1f0iter + f3elemf7elemf3f1f0 = append(f3elemf7elemf3f1f0, &f3elemf7elemf3f1f0elem) } - f4elemf7elemf3f1.SetAttributeNames(f4elemf7elemf3f1f0) + f3elemf7elemf3f1.SetAttributeNames(f3elemf7elemf3f1f0) } - if f4elemf7iter.DataSource.S3DataSource.S3DataDistributionType != nil { - f4elemf7elemf3f1.SetS3DataDistributionType(*f4elemf7iter.DataSource.S3DataSource.S3DataDistributionType) + if f3elemf7iter.DataSource.S3DataSource.S3DataDistributionType != nil { + f3elemf7elemf3f1.SetS3DataDistributionType(*f3elemf7iter.DataSource.S3DataSource.S3DataDistributionType) } - if f4elemf7iter.DataSource.S3DataSource.S3DataType != nil { - f4elemf7elemf3f1.SetS3DataType(*f4elemf7iter.DataSource.S3DataSource.S3DataType) + if f3elemf7iter.DataSource.S3DataSource.S3DataType != nil { + f3elemf7elemf3f1.SetS3DataType(*f3elemf7iter.DataSource.S3DataSource.S3DataType) } - if f4elemf7iter.DataSource.S3DataSource.S3URI != nil { - f4elemf7elemf3f1.SetS3Uri(*f4elemf7iter.DataSource.S3DataSource.S3URI) + if f3elemf7iter.DataSource.S3DataSource.S3URI != nil { + f3elemf7elemf3f1.SetS3Uri(*f3elemf7iter.DataSource.S3DataSource.S3URI) } - f4elemf7elemf3.SetS3DataSource(f4elemf7elemf3f1) + f3elemf7elemf3.SetS3DataSource(f3elemf7elemf3f1) } - f4elemf7elem.SetDataSource(f4elemf7elemf3) + f3elemf7elem.SetDataSource(f3elemf7elemf3) } - if f4elemf7iter.InputMode != nil { - f4elemf7elem.SetInputMode(*f4elemf7iter.InputMode) + if f3elemf7iter.InputMode != nil { + f3elemf7elem.SetInputMode(*f3elemf7iter.InputMode) } - if f4elemf7iter.RecordWrapperType != nil { - f4elemf7elem.SetRecordWrapperType(*f4elemf7iter.RecordWrapperType) + if f3elemf7iter.RecordWrapperType != nil { + f3elemf7elem.SetRecordWrapperType(*f3elemf7iter.RecordWrapperType) } - if f4elemf7iter.ShuffleConfig != nil { - f4elemf7elemf6 := &svcsdk.ShuffleConfig{} - if f4elemf7iter.ShuffleConfig.Seed != nil { - f4elemf7elemf6.SetSeed(*f4elemf7iter.ShuffleConfig.Seed) + if f3elemf7iter.ShuffleConfig != nil { + f3elemf7elemf6 := &svcsdk.ShuffleConfig{} + if f3elemf7iter.ShuffleConfig.Seed != nil { + f3elemf7elemf6.SetSeed(*f3elemf7iter.ShuffleConfig.Seed) } - f4elemf7elem.SetShuffleConfig(f4elemf7elemf6) + f3elemf7elem.SetShuffleConfig(f3elemf7elemf6) } - f4elemf7 = append(f4elemf7, f4elemf7elem) + f3elemf7 = append(f3elemf7, f3elemf7elem) } - f4elem.SetInputDataConfig(f4elemf7) + f3elem.SetInputDataConfig(f3elemf7) } - if f4iter.OutputDataConfig != nil { - f4elemf8 := &svcsdk.OutputDataConfig{} - if f4iter.OutputDataConfig.KMSKeyID != nil { - f4elemf8.SetKmsKeyId(*f4iter.OutputDataConfig.KMSKeyID) + if f3iter.OutputDataConfig != nil { + f3elemf8 := &svcsdk.OutputDataConfig{} + if f3iter.OutputDataConfig.KMSKeyID != nil { + f3elemf8.SetKmsKeyId(*f3iter.OutputDataConfig.KMSKeyID) } - if f4iter.OutputDataConfig.S3OutputPath != nil { - f4elemf8.SetS3OutputPath(*f4iter.OutputDataConfig.S3OutputPath) + if f3iter.OutputDataConfig.S3OutputPath != nil { + f3elemf8.SetS3OutputPath(*f3iter.OutputDataConfig.S3OutputPath) } - f4elem.SetOutputDataConfig(f4elemf8) + f3elem.SetOutputDataConfig(f3elemf8) } - if f4iter.ResourceConfig != nil { - f4elemf9 := &svcsdk.ResourceConfig{} - if f4iter.ResourceConfig.InstanceCount != nil { - f4elemf9.SetInstanceCount(*f4iter.ResourceConfig.InstanceCount) + if f3iter.ResourceConfig != nil { + f3elemf9 := &svcsdk.ResourceConfig{} + if f3iter.ResourceConfig.InstanceCount != nil { + f3elemf9.SetInstanceCount(*f3iter.ResourceConfig.InstanceCount) } - if f4iter.ResourceConfig.InstanceType != nil { - f4elemf9.SetInstanceType(*f4iter.ResourceConfig.InstanceType) + if f3iter.ResourceConfig.InstanceType != nil { + f3elemf9.SetInstanceType(*f3iter.ResourceConfig.InstanceType) } - if f4iter.ResourceConfig.VolumeKMSKeyID != nil { - f4elemf9.SetVolumeKmsKeyId(*f4iter.ResourceConfig.VolumeKMSKeyID) + if f3iter.ResourceConfig.VolumeKMSKeyID != nil { + f3elemf9.SetVolumeKmsKeyId(*f3iter.ResourceConfig.VolumeKMSKeyID) } - if f4iter.ResourceConfig.VolumeSizeInGB != nil { - f4elemf9.SetVolumeSizeInGB(*f4iter.ResourceConfig.VolumeSizeInGB) + if f3iter.ResourceConfig.VolumeSizeInGB != nil { + f3elemf9.SetVolumeSizeInGB(*f3iter.ResourceConfig.VolumeSizeInGB) } - f4elem.SetResourceConfig(f4elemf9) + f3elem.SetResourceConfig(f3elemf9) } - if f4iter.RoleARN != nil { - f4elem.SetRoleArn(*f4iter.RoleARN) + if f3iter.RoleARN != nil { + f3elem.SetRoleArn(*f3iter.RoleARN) } - if f4iter.StaticHyperParameters != nil { - f4elemf11 := map[string]*string{} - for f4elemf11key, f4elemf11valiter := range f4iter.StaticHyperParameters { - var f4elemf11val string - f4elemf11val = *f4elemf11valiter - f4elemf11[f4elemf11key] = &f4elemf11val + if f3iter.StaticHyperParameters != nil { + f3elemf11 := map[string]*string{} + for f3elemf11key, f3elemf11valiter := range f3iter.StaticHyperParameters { + var f3elemf11val string + f3elemf11val = *f3elemf11valiter + f3elemf11[f3elemf11key] = &f3elemf11val } - f4elem.SetStaticHyperParameters(f4elemf11) + f3elem.SetStaticHyperParameters(f3elemf11) } - if f4iter.StoppingCondition != nil { - f4elemf12 := &svcsdk.StoppingCondition{} - if f4iter.StoppingCondition.MaxRuntimeInSeconds != nil { - f4elemf12.SetMaxRuntimeInSeconds(*f4iter.StoppingCondition.MaxRuntimeInSeconds) + if f3iter.StoppingCondition != nil { + f3elemf12 := &svcsdk.StoppingCondition{} + if f3iter.StoppingCondition.MaxRuntimeInSeconds != nil { + f3elemf12.SetMaxRuntimeInSeconds(*f3iter.StoppingCondition.MaxRuntimeInSeconds) } - if f4iter.StoppingCondition.MaxWaitTimeInSeconds != nil { - f4elemf12.SetMaxWaitTimeInSeconds(*f4iter.StoppingCondition.MaxWaitTimeInSeconds) + if f3iter.StoppingCondition.MaxWaitTimeInSeconds != nil { + f3elemf12.SetMaxWaitTimeInSeconds(*f3iter.StoppingCondition.MaxWaitTimeInSeconds) } - f4elem.SetStoppingCondition(f4elemf12) + f3elem.SetStoppingCondition(f3elemf12) } - if f4iter.TuningObjective != nil { - f4elemf13 := &svcsdk.HyperParameterTuningJobObjective{} - if f4iter.TuningObjective.MetricName != nil { - f4elemf13.SetMetricName(*f4iter.TuningObjective.MetricName) + if f3iter.TuningObjective != nil { + f3elemf13 := &svcsdk.HyperParameterTuningJobObjective{} + if f3iter.TuningObjective.MetricName != nil { + f3elemf13.SetMetricName(*f3iter.TuningObjective.MetricName) } - if f4iter.TuningObjective.Type != nil { - f4elemf13.SetType(*f4iter.TuningObjective.Type) + if f3iter.TuningObjective.Type != nil { + f3elemf13.SetType(*f3iter.TuningObjective.Type) } - f4elem.SetTuningObjective(f4elemf13) + f3elem.SetTuningObjective(f3elemf13) } - if f4iter.VPCConfig != nil { - f4elemf14 := &svcsdk.VpcConfig{} - if f4iter.VPCConfig.SecurityGroupIDs != nil { - f4elemf14f0 := []*string{} - for _, f4elemf14f0iter := range f4iter.VPCConfig.SecurityGroupIDs { - var f4elemf14f0elem string - f4elemf14f0elem = *f4elemf14f0iter - f4elemf14f0 = append(f4elemf14f0, &f4elemf14f0elem) + if f3iter.VPCConfig != nil { + f3elemf14 := &svcsdk.VpcConfig{} + if f3iter.VPCConfig.SecurityGroupIDs != nil { + f3elemf14f0 := []*string{} + for _, f3elemf14f0iter := range f3iter.VPCConfig.SecurityGroupIDs { + var f3elemf14f0elem string + f3elemf14f0elem = *f3elemf14f0iter + f3elemf14f0 = append(f3elemf14f0, &f3elemf14f0elem) } - f4elemf14.SetSecurityGroupIds(f4elemf14f0) + f3elemf14.SetSecurityGroupIds(f3elemf14f0) } - if f4iter.VPCConfig.Subnets != nil { - f4elemf14f1 := []*string{} - for _, f4elemf14f1iter := range f4iter.VPCConfig.Subnets { - var f4elemf14f1elem string - f4elemf14f1elem = *f4elemf14f1iter - f4elemf14f1 = append(f4elemf14f1, &f4elemf14f1elem) + if f3iter.VPCConfig.Subnets != nil { + f3elemf14f1 := []*string{} + for _, f3elemf14f1iter := range f3iter.VPCConfig.Subnets { + var f3elemf14f1elem string + f3elemf14f1elem = *f3elemf14f1iter + f3elemf14f1 = append(f3elemf14f1, &f3elemf14f1elem) } - f4elemf14.SetSubnets(f4elemf14f1) + f3elemf14.SetSubnets(f3elemf14f1) } - f4elem.SetVpcConfig(f4elemf14) + f3elem.SetVpcConfig(f3elemf14) } - f4 = append(f4, f4elem) + f3 = append(f3, f3elem) } - res.SetTrainingJobDefinitions(f4) + res.SetTrainingJobDefinitions(f3) } if r.ko.Spec.WarmStartConfig != nil { - f5 := &svcsdk.HyperParameterTuningJobWarmStartConfig{} + f4 := &svcsdk.HyperParameterTuningJobWarmStartConfig{} if r.ko.Spec.WarmStartConfig.ParentHyperParameterTuningJobs != nil { - f5f0 := []*svcsdk.ParentHyperParameterTuningJob{} - for _, f5f0iter := range r.ko.Spec.WarmStartConfig.ParentHyperParameterTuningJobs { - f5f0elem := &svcsdk.ParentHyperParameterTuningJob{} - if f5f0iter.HyperParameterTuningJobName != nil { - f5f0elem.SetHyperParameterTuningJobName(*f5f0iter.HyperParameterTuningJobName) + f4f0 := []*svcsdk.ParentHyperParameterTuningJob{} + for _, f4f0iter := range r.ko.Spec.WarmStartConfig.ParentHyperParameterTuningJobs { + f4f0elem := &svcsdk.ParentHyperParameterTuningJob{} + if f4f0iter.HyperParameterTuningJobName != nil { + f4f0elem.SetHyperParameterTuningJobName(*f4f0iter.HyperParameterTuningJobName) } - f5f0 = append(f5f0, f5f0elem) + f4f0 = append(f4f0, f4f0elem) } - f5.SetParentHyperParameterTuningJobs(f5f0) + f4.SetParentHyperParameterTuningJobs(f4f0) } if r.ko.Spec.WarmStartConfig.WarmStartType != nil { - f5.SetWarmStartType(*r.ko.Spec.WarmStartConfig.WarmStartType) + f4.SetWarmStartType(*r.ko.Spec.WarmStartConfig.WarmStartType) } - res.SetWarmStartConfig(f5) + res.SetWarmStartConfig(f4) } return res, nil @@ -1619,7 +1623,7 @@ func (rm *resourceManager) sdkUpdate( ctx context.Context, desired *resource, latest *resource, - diffReporter *ackcompare.Reporter, + delta *ackcompare.Delta, ) (*resource, error) { // TODO(jaypipes): Figure this out... return nil, ackerr.NotImplemented diff --git a/pkg/resource/model/delta.go b/pkg/resource/model/delta.go new file mode 100644 index 00000000..b3e411c7 --- /dev/null +++ b/pkg/resource/model/delta.go @@ -0,0 +1,149 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package model + +import ( + ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" +) + +// newResourceDelta returns a new `ackcompare.Delta` used to compare two +// resources +func newResourceDelta( + a *resource, + b *resource, +) *ackcompare.Delta { + delta := ackcompare.NewDelta() + if (a == nil && b != nil) || + (a != nil && b == nil) { + delta.Add("", a, b) + return delta + } + + if ackcompare.HasNilDifference(a.ko.Spec.EnableNetworkIsolation, b.ko.Spec.EnableNetworkIsolation) { + delta.Add("Spec.EnableNetworkIsolation", a.ko.Spec.EnableNetworkIsolation, b.ko.Spec.EnableNetworkIsolation) + } else if a.ko.Spec.EnableNetworkIsolation != nil && b.ko.Spec.EnableNetworkIsolation != nil { + if *a.ko.Spec.EnableNetworkIsolation != *b.ko.Spec.EnableNetworkIsolation { + delta.Add("Spec.EnableNetworkIsolation", a.ko.Spec.EnableNetworkIsolation, b.ko.Spec.EnableNetworkIsolation) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.ExecutionRoleARN, b.ko.Spec.ExecutionRoleARN) { + delta.Add("Spec.ExecutionRoleARN", a.ko.Spec.ExecutionRoleARN, b.ko.Spec.ExecutionRoleARN) + } else if a.ko.Spec.ExecutionRoleARN != nil && b.ko.Spec.ExecutionRoleARN != nil { + if *a.ko.Spec.ExecutionRoleARN != *b.ko.Spec.ExecutionRoleARN { + delta.Add("Spec.ExecutionRoleARN", a.ko.Spec.ExecutionRoleARN, b.ko.Spec.ExecutionRoleARN) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.InferenceExecutionConfig, b.ko.Spec.InferenceExecutionConfig) { + delta.Add("Spec.InferenceExecutionConfig", a.ko.Spec.InferenceExecutionConfig, b.ko.Spec.InferenceExecutionConfig) + } else if a.ko.Spec.InferenceExecutionConfig != nil && b.ko.Spec.InferenceExecutionConfig != nil { + if ackcompare.HasNilDifference(a.ko.Spec.InferenceExecutionConfig.Mode, b.ko.Spec.InferenceExecutionConfig.Mode) { + delta.Add("Spec.InferenceExecutionConfig.Mode", a.ko.Spec.InferenceExecutionConfig.Mode, b.ko.Spec.InferenceExecutionConfig.Mode) + } else if a.ko.Spec.InferenceExecutionConfig.Mode != nil && b.ko.Spec.InferenceExecutionConfig.Mode != nil { + if *a.ko.Spec.InferenceExecutionConfig.Mode != *b.ko.Spec.InferenceExecutionConfig.Mode { + delta.Add("Spec.InferenceExecutionConfig.Mode", a.ko.Spec.InferenceExecutionConfig.Mode, b.ko.Spec.InferenceExecutionConfig.Mode) + } + } + } + if ackcompare.HasNilDifference(a.ko.Spec.ModelName, b.ko.Spec.ModelName) { + delta.Add("Spec.ModelName", a.ko.Spec.ModelName, b.ko.Spec.ModelName) + } else if a.ko.Spec.ModelName != nil && b.ko.Spec.ModelName != nil { + if *a.ko.Spec.ModelName != *b.ko.Spec.ModelName { + delta.Add("Spec.ModelName", a.ko.Spec.ModelName, b.ko.Spec.ModelName) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.PrimaryContainer, b.ko.Spec.PrimaryContainer) { + delta.Add("Spec.PrimaryContainer", a.ko.Spec.PrimaryContainer, b.ko.Spec.PrimaryContainer) + } else if a.ko.Spec.PrimaryContainer != nil && b.ko.Spec.PrimaryContainer != nil { + if ackcompare.HasNilDifference(a.ko.Spec.PrimaryContainer.ContainerHostname, b.ko.Spec.PrimaryContainer.ContainerHostname) { + delta.Add("Spec.PrimaryContainer.ContainerHostname", a.ko.Spec.PrimaryContainer.ContainerHostname, b.ko.Spec.PrimaryContainer.ContainerHostname) + } else if a.ko.Spec.PrimaryContainer.ContainerHostname != nil && b.ko.Spec.PrimaryContainer.ContainerHostname != nil { + if *a.ko.Spec.PrimaryContainer.ContainerHostname != *b.ko.Spec.PrimaryContainer.ContainerHostname { + delta.Add("Spec.PrimaryContainer.ContainerHostname", a.ko.Spec.PrimaryContainer.ContainerHostname, b.ko.Spec.PrimaryContainer.ContainerHostname) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.PrimaryContainer.Environment, b.ko.Spec.PrimaryContainer.Environment) { + delta.Add("Spec.PrimaryContainer.Environment", a.ko.Spec.PrimaryContainer.Environment, b.ko.Spec.PrimaryContainer.Environment) + } else if a.ko.Spec.PrimaryContainer.Environment != nil && b.ko.Spec.PrimaryContainer.Environment != nil { + if !ackcompare.MapStringStringPEqual(a.ko.Spec.PrimaryContainer.Environment, b.ko.Spec.PrimaryContainer.Environment) { + delta.Add("Spec.PrimaryContainer.Environment", a.ko.Spec.PrimaryContainer.Environment, b.ko.Spec.PrimaryContainer.Environment) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.PrimaryContainer.Image, b.ko.Spec.PrimaryContainer.Image) { + delta.Add("Spec.PrimaryContainer.Image", a.ko.Spec.PrimaryContainer.Image, b.ko.Spec.PrimaryContainer.Image) + } else if a.ko.Spec.PrimaryContainer.Image != nil && b.ko.Spec.PrimaryContainer.Image != nil { + if *a.ko.Spec.PrimaryContainer.Image != *b.ko.Spec.PrimaryContainer.Image { + delta.Add("Spec.PrimaryContainer.Image", a.ko.Spec.PrimaryContainer.Image, b.ko.Spec.PrimaryContainer.Image) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.PrimaryContainer.ImageConfig, b.ko.Spec.PrimaryContainer.ImageConfig) { + delta.Add("Spec.PrimaryContainer.ImageConfig", a.ko.Spec.PrimaryContainer.ImageConfig, b.ko.Spec.PrimaryContainer.ImageConfig) + } else if a.ko.Spec.PrimaryContainer.ImageConfig != nil && b.ko.Spec.PrimaryContainer.ImageConfig != nil { + if ackcompare.HasNilDifference(a.ko.Spec.PrimaryContainer.ImageConfig.RepositoryAccessMode, b.ko.Spec.PrimaryContainer.ImageConfig.RepositoryAccessMode) { + delta.Add("Spec.PrimaryContainer.ImageConfig.RepositoryAccessMode", a.ko.Spec.PrimaryContainer.ImageConfig.RepositoryAccessMode, b.ko.Spec.PrimaryContainer.ImageConfig.RepositoryAccessMode) + } else if a.ko.Spec.PrimaryContainer.ImageConfig.RepositoryAccessMode != nil && b.ko.Spec.PrimaryContainer.ImageConfig.RepositoryAccessMode != nil { + if *a.ko.Spec.PrimaryContainer.ImageConfig.RepositoryAccessMode != *b.ko.Spec.PrimaryContainer.ImageConfig.RepositoryAccessMode { + delta.Add("Spec.PrimaryContainer.ImageConfig.RepositoryAccessMode", a.ko.Spec.PrimaryContainer.ImageConfig.RepositoryAccessMode, b.ko.Spec.PrimaryContainer.ImageConfig.RepositoryAccessMode) + } + } + } + if ackcompare.HasNilDifference(a.ko.Spec.PrimaryContainer.Mode, b.ko.Spec.PrimaryContainer.Mode) { + delta.Add("Spec.PrimaryContainer.Mode", a.ko.Spec.PrimaryContainer.Mode, b.ko.Spec.PrimaryContainer.Mode) + } else if a.ko.Spec.PrimaryContainer.Mode != nil && b.ko.Spec.PrimaryContainer.Mode != nil { + if *a.ko.Spec.PrimaryContainer.Mode != *b.ko.Spec.PrimaryContainer.Mode { + delta.Add("Spec.PrimaryContainer.Mode", a.ko.Spec.PrimaryContainer.Mode, b.ko.Spec.PrimaryContainer.Mode) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.PrimaryContainer.ModelDataURL, b.ko.Spec.PrimaryContainer.ModelDataURL) { + delta.Add("Spec.PrimaryContainer.ModelDataURL", a.ko.Spec.PrimaryContainer.ModelDataURL, b.ko.Spec.PrimaryContainer.ModelDataURL) + } else if a.ko.Spec.PrimaryContainer.ModelDataURL != nil && b.ko.Spec.PrimaryContainer.ModelDataURL != nil { + if *a.ko.Spec.PrimaryContainer.ModelDataURL != *b.ko.Spec.PrimaryContainer.ModelDataURL { + delta.Add("Spec.PrimaryContainer.ModelDataURL", a.ko.Spec.PrimaryContainer.ModelDataURL, b.ko.Spec.PrimaryContainer.ModelDataURL) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.PrimaryContainer.ModelPackageName, b.ko.Spec.PrimaryContainer.ModelPackageName) { + delta.Add("Spec.PrimaryContainer.ModelPackageName", a.ko.Spec.PrimaryContainer.ModelPackageName, b.ko.Spec.PrimaryContainer.ModelPackageName) + } else if a.ko.Spec.PrimaryContainer.ModelPackageName != nil && b.ko.Spec.PrimaryContainer.ModelPackageName != nil { + if *a.ko.Spec.PrimaryContainer.ModelPackageName != *b.ko.Spec.PrimaryContainer.ModelPackageName { + delta.Add("Spec.PrimaryContainer.ModelPackageName", a.ko.Spec.PrimaryContainer.ModelPackageName, b.ko.Spec.PrimaryContainer.ModelPackageName) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.PrimaryContainer.MultiModelConfig, b.ko.Spec.PrimaryContainer.MultiModelConfig) { + delta.Add("Spec.PrimaryContainer.MultiModelConfig", a.ko.Spec.PrimaryContainer.MultiModelConfig, b.ko.Spec.PrimaryContainer.MultiModelConfig) + } else if a.ko.Spec.PrimaryContainer.MultiModelConfig != nil && b.ko.Spec.PrimaryContainer.MultiModelConfig != nil { + if ackcompare.HasNilDifference(a.ko.Spec.PrimaryContainer.MultiModelConfig.ModelCacheSetting, b.ko.Spec.PrimaryContainer.MultiModelConfig.ModelCacheSetting) { + delta.Add("Spec.PrimaryContainer.MultiModelConfig.ModelCacheSetting", a.ko.Spec.PrimaryContainer.MultiModelConfig.ModelCacheSetting, b.ko.Spec.PrimaryContainer.MultiModelConfig.ModelCacheSetting) + } else if a.ko.Spec.PrimaryContainer.MultiModelConfig.ModelCacheSetting != nil && b.ko.Spec.PrimaryContainer.MultiModelConfig.ModelCacheSetting != nil { + if *a.ko.Spec.PrimaryContainer.MultiModelConfig.ModelCacheSetting != *b.ko.Spec.PrimaryContainer.MultiModelConfig.ModelCacheSetting { + delta.Add("Spec.PrimaryContainer.MultiModelConfig.ModelCacheSetting", a.ko.Spec.PrimaryContainer.MultiModelConfig.ModelCacheSetting, b.ko.Spec.PrimaryContainer.MultiModelConfig.ModelCacheSetting) + } + } + } + } + if ackcompare.HasNilDifference(a.ko.Spec.VPCConfig, b.ko.Spec.VPCConfig) { + delta.Add("Spec.VPCConfig", a.ko.Spec.VPCConfig, b.ko.Spec.VPCConfig) + } else if a.ko.Spec.VPCConfig != nil && b.ko.Spec.VPCConfig != nil { + + if !ackcompare.SliceStringPEqual(a.ko.Spec.VPCConfig.SecurityGroupIDs, b.ko.Spec.VPCConfig.SecurityGroupIDs) { + delta.Add("Spec.VPCConfig.SecurityGroupIDs", a.ko.Spec.VPCConfig.SecurityGroupIDs, b.ko.Spec.VPCConfig.SecurityGroupIDs) + } + + if !ackcompare.SliceStringPEqual(a.ko.Spec.VPCConfig.Subnets, b.ko.Spec.VPCConfig.Subnets) { + delta.Add("Spec.VPCConfig.Subnets", a.ko.Spec.VPCConfig.Subnets, b.ko.Spec.VPCConfig.Subnets) + } + } + + return delta +} diff --git a/pkg/resource/model/descriptor.go b/pkg/resource/model/descriptor.go index 57949ce8..d27c82f3 100644 --- a/pkg/resource/model/descriptor.go +++ b/pkg/resource/model/descriptor.go @@ -18,8 +18,6 @@ package model import ( ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" k8sapirt "k8s.io/apimachinery/pkg/runtime" k8sctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -65,37 +63,10 @@ func (d *resourceDescriptor) ResourceFromRuntimeObject( } } -// Equal returns true if the two supplied AWSResources have the same content. -// The underlying types of the two supplied AWSResources should be the same. In -// other words, the Equal() method should be called with the same concrete -// implementing AWSResource type -func (d *resourceDescriptor) Equal( - a acktypes.AWSResource, - b acktypes.AWSResource, -) bool { - ac := a.(*resource) - bc := b.(*resource) - opts := []cmp.Option{cmpopts.EquateEmpty()} - return cmp.Equal(ac.ko, bc.ko, opts...) -} - -// Diff returns a Reporter which provides the difference between two supplied -// AWSResources. The underlying types of the two supplied AWSResources should -// be the same. In other words, the Diff() method should be called with the -// same concrete implementing AWSResource type -func (d *resourceDescriptor) Diff( - a acktypes.AWSResource, - b acktypes.AWSResource, -) *ackcompare.Reporter { - ac := a.(*resource) - bc := b.(*resource) - var diffReporter ackcompare.Reporter - opts := []cmp.Option{ - cmp.Reporter(&diffReporter), - cmp.AllowUnexported(svcapitypes.Model{}), - } - cmp.Equal(ac.ko, bc.ko, opts...) - return &diffReporter +// Delta returns an `ackcompare.Delta` object containing the difference between +// one `AWSResource` and another. +func (d *resourceDescriptor) Delta(a, b acktypes.AWSResource) *ackcompare.Delta { + return newResourceDelta(a.(*resource), b.(*resource)) } // UpdateCRStatus accepts an AWSResource object and changes the Status diff --git a/pkg/resource/model/manager.go b/pkg/resource/model/manager.go index ce81a551..67c94633 100644 --- a/pkg/resource/model/manager.go +++ b/pkg/resource/model/manager.go @@ -125,7 +125,7 @@ func (rm *resourceManager) Update( ctx context.Context, resDesired acktypes.AWSResource, resLatest acktypes.AWSResource, - diffReporter *ackcompare.Reporter, + delta *ackcompare.Delta, ) (acktypes.AWSResource, error) { desired := rm.concreteResource(resDesired) latest := rm.concreteResource(resLatest) @@ -133,7 +133,7 @@ func (rm *resourceManager) Update( // Should never happen... if it does, it's buggy code. panic("resource manager's Update() method received resource with nil CR object") } - updated, err := rm.sdkUpdate(ctx, desired, latest, diffReporter) + updated, err := rm.sdkUpdate(ctx, desired, latest, delta) if err != nil { return rm.onError(latest, err) } diff --git a/pkg/resource/model/resource.go b/pkg/resource/model/resource.go index ac7fb1d5..7b632387 100644 --- a/pkg/resource/model/resource.go +++ b/pkg/resource/model/resource.go @@ -24,7 +24,7 @@ import ( svcapitypes "github.com/aws-controllers-k8s/sagemaker-controller/apis/v1alpha1" ) -// resource implements the `aws-service-operator-k8s/pkg/types.AWSResource` +// resource implements the `aws-controller-k8s/runtime/pkg/types.AWSResource` // interface type resource struct { // The Kubernetes-native CR representing the resource diff --git a/pkg/resource/model/sdk.go b/pkg/resource/model/sdk.go index 6b1dec13..c0ebc255 100644 --- a/pkg/resource/model/sdk.go +++ b/pkg/resource/model/sdk.go @@ -116,12 +116,18 @@ func (rm *resourceManager) sdkFind( f0 = append(f0, f0elem) } ko.Spec.Containers = f0 + } else { + ko.Spec.Containers = nil } if resp.EnableNetworkIsolation != nil { ko.Spec.EnableNetworkIsolation = resp.EnableNetworkIsolation + } else { + ko.Spec.EnableNetworkIsolation = nil } if resp.ExecutionRoleArn != nil { ko.Spec.ExecutionRoleARN = resp.ExecutionRoleArn + } else { + ko.Spec.ExecutionRoleARN = nil } if resp.InferenceExecutionConfig != nil { f4 := &svcapitypes.InferenceExecutionConfig{} @@ -129,6 +135,8 @@ func (rm *resourceManager) sdkFind( f4.Mode = resp.InferenceExecutionConfig.Mode } ko.Spec.InferenceExecutionConfig = f4 + } else { + ko.Spec.InferenceExecutionConfig = nil } if ko.Status.ACKResourceMetadata == nil { ko.Status.ACKResourceMetadata = &ackv1alpha1.ResourceMetadata{} @@ -139,6 +147,8 @@ func (rm *resourceManager) sdkFind( } if resp.ModelName != nil { ko.Spec.ModelName = resp.ModelName + } else { + ko.Spec.ModelName = nil } if resp.PrimaryContainer != nil { f7 := &svcapitypes.ContainerDefinition{} @@ -181,6 +191,8 @@ func (rm *resourceManager) sdkFind( f7.MultiModelConfig = f7f7 } ko.Spec.PrimaryContainer = f7 + } else { + ko.Spec.PrimaryContainer = nil } if resp.VpcConfig != nil { f8 := &svcapitypes.VPCConfig{} @@ -203,6 +215,8 @@ func (rm *resourceManager) sdkFind( f8.Subnets = f8f1 } ko.Spec.VPCConfig = f8 + } else { + ko.Spec.VPCConfig = nil } rm.setStatusDefaults(ko) @@ -379,41 +393,27 @@ func (rm *resourceManager) newCreateRequestPayload( } res.SetPrimaryContainer(f5) } - if r.ko.Spec.Tags != nil { - f6 := []*svcsdk.Tag{} - for _, f6iter := range r.ko.Spec.Tags { - f6elem := &svcsdk.Tag{} - if f6iter.Key != nil { - f6elem.SetKey(*f6iter.Key) - } - if f6iter.Value != nil { - f6elem.SetValue(*f6iter.Value) - } - f6 = append(f6, f6elem) - } - res.SetTags(f6) - } if r.ko.Spec.VPCConfig != nil { - f7 := &svcsdk.VpcConfig{} + f6 := &svcsdk.VpcConfig{} if r.ko.Spec.VPCConfig.SecurityGroupIDs != nil { - f7f0 := []*string{} - for _, f7f0iter := range r.ko.Spec.VPCConfig.SecurityGroupIDs { - var f7f0elem string - f7f0elem = *f7f0iter - f7f0 = append(f7f0, &f7f0elem) + f6f0 := []*string{} + for _, f6f0iter := range r.ko.Spec.VPCConfig.SecurityGroupIDs { + var f6f0elem string + f6f0elem = *f6f0iter + f6f0 = append(f6f0, &f6f0elem) } - f7.SetSecurityGroupIds(f7f0) + f6.SetSecurityGroupIds(f6f0) } if r.ko.Spec.VPCConfig.Subnets != nil { - f7f1 := []*string{} - for _, f7f1iter := range r.ko.Spec.VPCConfig.Subnets { - var f7f1elem string - f7f1elem = *f7f1iter - f7f1 = append(f7f1, &f7f1elem) + f6f1 := []*string{} + for _, f6f1iter := range r.ko.Spec.VPCConfig.Subnets { + var f6f1elem string + f6f1elem = *f6f1iter + f6f1 = append(f6f1, &f6f1elem) } - f7.SetSubnets(f7f1) + f6.SetSubnets(f6f1) } - res.SetVpcConfig(f7) + res.SetVpcConfig(f6) } return res, nil @@ -425,7 +425,7 @@ func (rm *resourceManager) sdkUpdate( ctx context.Context, desired *resource, latest *resource, - diffReporter *ackcompare.Reporter, + delta *ackcompare.Delta, ) (*resource, error) { // TODO(jaypipes): Figure this out... return nil, ackerr.NotImplemented diff --git a/pkg/resource/processing_job/delta.go b/pkg/resource/processing_job/delta.go new file mode 100644 index 00000000..e2f33a62 --- /dev/null +++ b/pkg/resource/processing_job/delta.go @@ -0,0 +1,192 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package processing_job + +import ( + ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" +) + +// newResourceDelta returns a new `ackcompare.Delta` used to compare two +// resources +func newResourceDelta( + a *resource, + b *resource, +) *ackcompare.Delta { + delta := ackcompare.NewDelta() + if (a == nil && b != nil) || + (a != nil && b == nil) { + delta.Add("", a, b) + return delta + } + + if ackcompare.HasNilDifference(a.ko.Spec.AppSpecification, b.ko.Spec.AppSpecification) { + delta.Add("Spec.AppSpecification", a.ko.Spec.AppSpecification, b.ko.Spec.AppSpecification) + } else if a.ko.Spec.AppSpecification != nil && b.ko.Spec.AppSpecification != nil { + + if !ackcompare.SliceStringPEqual(a.ko.Spec.AppSpecification.ContainerArguments, b.ko.Spec.AppSpecification.ContainerArguments) { + delta.Add("Spec.AppSpecification.ContainerArguments", a.ko.Spec.AppSpecification.ContainerArguments, b.ko.Spec.AppSpecification.ContainerArguments) + } + + if !ackcompare.SliceStringPEqual(a.ko.Spec.AppSpecification.ContainerEntrypoint, b.ko.Spec.AppSpecification.ContainerEntrypoint) { + delta.Add("Spec.AppSpecification.ContainerEntrypoint", a.ko.Spec.AppSpecification.ContainerEntrypoint, b.ko.Spec.AppSpecification.ContainerEntrypoint) + } + if ackcompare.HasNilDifference(a.ko.Spec.AppSpecification.ImageURI, b.ko.Spec.AppSpecification.ImageURI) { + delta.Add("Spec.AppSpecification.ImageURI", a.ko.Spec.AppSpecification.ImageURI, b.ko.Spec.AppSpecification.ImageURI) + } else if a.ko.Spec.AppSpecification.ImageURI != nil && b.ko.Spec.AppSpecification.ImageURI != nil { + if *a.ko.Spec.AppSpecification.ImageURI != *b.ko.Spec.AppSpecification.ImageURI { + delta.Add("Spec.AppSpecification.ImageURI", a.ko.Spec.AppSpecification.ImageURI, b.ko.Spec.AppSpecification.ImageURI) + } + } + } + if ackcompare.HasNilDifference(a.ko.Spec.Environment, b.ko.Spec.Environment) { + delta.Add("Spec.Environment", a.ko.Spec.Environment, b.ko.Spec.Environment) + } else if a.ko.Spec.Environment != nil && b.ko.Spec.Environment != nil { + if !ackcompare.MapStringStringPEqual(a.ko.Spec.Environment, b.ko.Spec.Environment) { + delta.Add("Spec.Environment", a.ko.Spec.Environment, b.ko.Spec.Environment) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.ExperimentConfig, b.ko.Spec.ExperimentConfig) { + delta.Add("Spec.ExperimentConfig", a.ko.Spec.ExperimentConfig, b.ko.Spec.ExperimentConfig) + } else if a.ko.Spec.ExperimentConfig != nil && b.ko.Spec.ExperimentConfig != nil { + if ackcompare.HasNilDifference(a.ko.Spec.ExperimentConfig.ExperimentName, b.ko.Spec.ExperimentConfig.ExperimentName) { + delta.Add("Spec.ExperimentConfig.ExperimentName", a.ko.Spec.ExperimentConfig.ExperimentName, b.ko.Spec.ExperimentConfig.ExperimentName) + } else if a.ko.Spec.ExperimentConfig.ExperimentName != nil && b.ko.Spec.ExperimentConfig.ExperimentName != nil { + if *a.ko.Spec.ExperimentConfig.ExperimentName != *b.ko.Spec.ExperimentConfig.ExperimentName { + delta.Add("Spec.ExperimentConfig.ExperimentName", a.ko.Spec.ExperimentConfig.ExperimentName, b.ko.Spec.ExperimentConfig.ExperimentName) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.ExperimentConfig.TrialComponentDisplayName, b.ko.Spec.ExperimentConfig.TrialComponentDisplayName) { + delta.Add("Spec.ExperimentConfig.TrialComponentDisplayName", a.ko.Spec.ExperimentConfig.TrialComponentDisplayName, b.ko.Spec.ExperimentConfig.TrialComponentDisplayName) + } else if a.ko.Spec.ExperimentConfig.TrialComponentDisplayName != nil && b.ko.Spec.ExperimentConfig.TrialComponentDisplayName != nil { + if *a.ko.Spec.ExperimentConfig.TrialComponentDisplayName != *b.ko.Spec.ExperimentConfig.TrialComponentDisplayName { + delta.Add("Spec.ExperimentConfig.TrialComponentDisplayName", a.ko.Spec.ExperimentConfig.TrialComponentDisplayName, b.ko.Spec.ExperimentConfig.TrialComponentDisplayName) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.ExperimentConfig.TrialName, b.ko.Spec.ExperimentConfig.TrialName) { + delta.Add("Spec.ExperimentConfig.TrialName", a.ko.Spec.ExperimentConfig.TrialName, b.ko.Spec.ExperimentConfig.TrialName) + } else if a.ko.Spec.ExperimentConfig.TrialName != nil && b.ko.Spec.ExperimentConfig.TrialName != nil { + if *a.ko.Spec.ExperimentConfig.TrialName != *b.ko.Spec.ExperimentConfig.TrialName { + delta.Add("Spec.ExperimentConfig.TrialName", a.ko.Spec.ExperimentConfig.TrialName, b.ko.Spec.ExperimentConfig.TrialName) + } + } + } + if ackcompare.HasNilDifference(a.ko.Spec.NetworkConfig, b.ko.Spec.NetworkConfig) { + delta.Add("Spec.NetworkConfig", a.ko.Spec.NetworkConfig, b.ko.Spec.NetworkConfig) + } else if a.ko.Spec.NetworkConfig != nil && b.ko.Spec.NetworkConfig != nil { + if ackcompare.HasNilDifference(a.ko.Spec.NetworkConfig.EnableInterContainerTrafficEncryption, b.ko.Spec.NetworkConfig.EnableInterContainerTrafficEncryption) { + delta.Add("Spec.NetworkConfig.EnableInterContainerTrafficEncryption", a.ko.Spec.NetworkConfig.EnableInterContainerTrafficEncryption, b.ko.Spec.NetworkConfig.EnableInterContainerTrafficEncryption) + } else if a.ko.Spec.NetworkConfig.EnableInterContainerTrafficEncryption != nil && b.ko.Spec.NetworkConfig.EnableInterContainerTrafficEncryption != nil { + if *a.ko.Spec.NetworkConfig.EnableInterContainerTrafficEncryption != *b.ko.Spec.NetworkConfig.EnableInterContainerTrafficEncryption { + delta.Add("Spec.NetworkConfig.EnableInterContainerTrafficEncryption", a.ko.Spec.NetworkConfig.EnableInterContainerTrafficEncryption, b.ko.Spec.NetworkConfig.EnableInterContainerTrafficEncryption) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.NetworkConfig.EnableNetworkIsolation, b.ko.Spec.NetworkConfig.EnableNetworkIsolation) { + delta.Add("Spec.NetworkConfig.EnableNetworkIsolation", a.ko.Spec.NetworkConfig.EnableNetworkIsolation, b.ko.Spec.NetworkConfig.EnableNetworkIsolation) + } else if a.ko.Spec.NetworkConfig.EnableNetworkIsolation != nil && b.ko.Spec.NetworkConfig.EnableNetworkIsolation != nil { + if *a.ko.Spec.NetworkConfig.EnableNetworkIsolation != *b.ko.Spec.NetworkConfig.EnableNetworkIsolation { + delta.Add("Spec.NetworkConfig.EnableNetworkIsolation", a.ko.Spec.NetworkConfig.EnableNetworkIsolation, b.ko.Spec.NetworkConfig.EnableNetworkIsolation) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.NetworkConfig.VPCConfig, b.ko.Spec.NetworkConfig.VPCConfig) { + delta.Add("Spec.NetworkConfig.VPCConfig", a.ko.Spec.NetworkConfig.VPCConfig, b.ko.Spec.NetworkConfig.VPCConfig) + } else if a.ko.Spec.NetworkConfig.VPCConfig != nil && b.ko.Spec.NetworkConfig.VPCConfig != nil { + + if !ackcompare.SliceStringPEqual(a.ko.Spec.NetworkConfig.VPCConfig.SecurityGroupIDs, b.ko.Spec.NetworkConfig.VPCConfig.SecurityGroupIDs) { + delta.Add("Spec.NetworkConfig.VPCConfig.SecurityGroupIDs", a.ko.Spec.NetworkConfig.VPCConfig.SecurityGroupIDs, b.ko.Spec.NetworkConfig.VPCConfig.SecurityGroupIDs) + } + + if !ackcompare.SliceStringPEqual(a.ko.Spec.NetworkConfig.VPCConfig.Subnets, b.ko.Spec.NetworkConfig.VPCConfig.Subnets) { + delta.Add("Spec.NetworkConfig.VPCConfig.Subnets", a.ko.Spec.NetworkConfig.VPCConfig.Subnets, b.ko.Spec.NetworkConfig.VPCConfig.Subnets) + } + } + } + + if ackcompare.HasNilDifference(a.ko.Spec.ProcessingJobName, b.ko.Spec.ProcessingJobName) { + delta.Add("Spec.ProcessingJobName", a.ko.Spec.ProcessingJobName, b.ko.Spec.ProcessingJobName) + } else if a.ko.Spec.ProcessingJobName != nil && b.ko.Spec.ProcessingJobName != nil { + if *a.ko.Spec.ProcessingJobName != *b.ko.Spec.ProcessingJobName { + delta.Add("Spec.ProcessingJobName", a.ko.Spec.ProcessingJobName, b.ko.Spec.ProcessingJobName) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.ProcessingOutputConfig, b.ko.Spec.ProcessingOutputConfig) { + delta.Add("Spec.ProcessingOutputConfig", a.ko.Spec.ProcessingOutputConfig, b.ko.Spec.ProcessingOutputConfig) + } else if a.ko.Spec.ProcessingOutputConfig != nil && b.ko.Spec.ProcessingOutputConfig != nil { + if ackcompare.HasNilDifference(a.ko.Spec.ProcessingOutputConfig.KMSKeyID, b.ko.Spec.ProcessingOutputConfig.KMSKeyID) { + delta.Add("Spec.ProcessingOutputConfig.KMSKeyID", a.ko.Spec.ProcessingOutputConfig.KMSKeyID, b.ko.Spec.ProcessingOutputConfig.KMSKeyID) + } else if a.ko.Spec.ProcessingOutputConfig.KMSKeyID != nil && b.ko.Spec.ProcessingOutputConfig.KMSKeyID != nil { + if *a.ko.Spec.ProcessingOutputConfig.KMSKeyID != *b.ko.Spec.ProcessingOutputConfig.KMSKeyID { + delta.Add("Spec.ProcessingOutputConfig.KMSKeyID", a.ko.Spec.ProcessingOutputConfig.KMSKeyID, b.ko.Spec.ProcessingOutputConfig.KMSKeyID) + } + } + + } + if ackcompare.HasNilDifference(a.ko.Spec.ProcessingResources, b.ko.Spec.ProcessingResources) { + delta.Add("Spec.ProcessingResources", a.ko.Spec.ProcessingResources, b.ko.Spec.ProcessingResources) + } else if a.ko.Spec.ProcessingResources != nil && b.ko.Spec.ProcessingResources != nil { + if ackcompare.HasNilDifference(a.ko.Spec.ProcessingResources.ClusterConfig, b.ko.Spec.ProcessingResources.ClusterConfig) { + delta.Add("Spec.ProcessingResources.ClusterConfig", a.ko.Spec.ProcessingResources.ClusterConfig, b.ko.Spec.ProcessingResources.ClusterConfig) + } else if a.ko.Spec.ProcessingResources.ClusterConfig != nil && b.ko.Spec.ProcessingResources.ClusterConfig != nil { + if ackcompare.HasNilDifference(a.ko.Spec.ProcessingResources.ClusterConfig.InstanceCount, b.ko.Spec.ProcessingResources.ClusterConfig.InstanceCount) { + delta.Add("Spec.ProcessingResources.ClusterConfig.InstanceCount", a.ko.Spec.ProcessingResources.ClusterConfig.InstanceCount, b.ko.Spec.ProcessingResources.ClusterConfig.InstanceCount) + } else if a.ko.Spec.ProcessingResources.ClusterConfig.InstanceCount != nil && b.ko.Spec.ProcessingResources.ClusterConfig.InstanceCount != nil { + if *a.ko.Spec.ProcessingResources.ClusterConfig.InstanceCount != *b.ko.Spec.ProcessingResources.ClusterConfig.InstanceCount { + delta.Add("Spec.ProcessingResources.ClusterConfig.InstanceCount", a.ko.Spec.ProcessingResources.ClusterConfig.InstanceCount, b.ko.Spec.ProcessingResources.ClusterConfig.InstanceCount) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.ProcessingResources.ClusterConfig.InstanceType, b.ko.Spec.ProcessingResources.ClusterConfig.InstanceType) { + delta.Add("Spec.ProcessingResources.ClusterConfig.InstanceType", a.ko.Spec.ProcessingResources.ClusterConfig.InstanceType, b.ko.Spec.ProcessingResources.ClusterConfig.InstanceType) + } else if a.ko.Spec.ProcessingResources.ClusterConfig.InstanceType != nil && b.ko.Spec.ProcessingResources.ClusterConfig.InstanceType != nil { + if *a.ko.Spec.ProcessingResources.ClusterConfig.InstanceType != *b.ko.Spec.ProcessingResources.ClusterConfig.InstanceType { + delta.Add("Spec.ProcessingResources.ClusterConfig.InstanceType", a.ko.Spec.ProcessingResources.ClusterConfig.InstanceType, b.ko.Spec.ProcessingResources.ClusterConfig.InstanceType) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.ProcessingResources.ClusterConfig.VolumeKMSKeyID, b.ko.Spec.ProcessingResources.ClusterConfig.VolumeKMSKeyID) { + delta.Add("Spec.ProcessingResources.ClusterConfig.VolumeKMSKeyID", a.ko.Spec.ProcessingResources.ClusterConfig.VolumeKMSKeyID, b.ko.Spec.ProcessingResources.ClusterConfig.VolumeKMSKeyID) + } else if a.ko.Spec.ProcessingResources.ClusterConfig.VolumeKMSKeyID != nil && b.ko.Spec.ProcessingResources.ClusterConfig.VolumeKMSKeyID != nil { + if *a.ko.Spec.ProcessingResources.ClusterConfig.VolumeKMSKeyID != *b.ko.Spec.ProcessingResources.ClusterConfig.VolumeKMSKeyID { + delta.Add("Spec.ProcessingResources.ClusterConfig.VolumeKMSKeyID", a.ko.Spec.ProcessingResources.ClusterConfig.VolumeKMSKeyID, b.ko.Spec.ProcessingResources.ClusterConfig.VolumeKMSKeyID) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.ProcessingResources.ClusterConfig.VolumeSizeInGB, b.ko.Spec.ProcessingResources.ClusterConfig.VolumeSizeInGB) { + delta.Add("Spec.ProcessingResources.ClusterConfig.VolumeSizeInGB", a.ko.Spec.ProcessingResources.ClusterConfig.VolumeSizeInGB, b.ko.Spec.ProcessingResources.ClusterConfig.VolumeSizeInGB) + } else if a.ko.Spec.ProcessingResources.ClusterConfig.VolumeSizeInGB != nil && b.ko.Spec.ProcessingResources.ClusterConfig.VolumeSizeInGB != nil { + if *a.ko.Spec.ProcessingResources.ClusterConfig.VolumeSizeInGB != *b.ko.Spec.ProcessingResources.ClusterConfig.VolumeSizeInGB { + delta.Add("Spec.ProcessingResources.ClusterConfig.VolumeSizeInGB", a.ko.Spec.ProcessingResources.ClusterConfig.VolumeSizeInGB, b.ko.Spec.ProcessingResources.ClusterConfig.VolumeSizeInGB) + } + } + } + } + if ackcompare.HasNilDifference(a.ko.Spec.RoleARN, b.ko.Spec.RoleARN) { + delta.Add("Spec.RoleARN", a.ko.Spec.RoleARN, b.ko.Spec.RoleARN) + } else if a.ko.Spec.RoleARN != nil && b.ko.Spec.RoleARN != nil { + if *a.ko.Spec.RoleARN != *b.ko.Spec.RoleARN { + delta.Add("Spec.RoleARN", a.ko.Spec.RoleARN, b.ko.Spec.RoleARN) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.StoppingCondition, b.ko.Spec.StoppingCondition) { + delta.Add("Spec.StoppingCondition", a.ko.Spec.StoppingCondition, b.ko.Spec.StoppingCondition) + } else if a.ko.Spec.StoppingCondition != nil && b.ko.Spec.StoppingCondition != nil { + if ackcompare.HasNilDifference(a.ko.Spec.StoppingCondition.MaxRuntimeInSeconds, b.ko.Spec.StoppingCondition.MaxRuntimeInSeconds) { + delta.Add("Spec.StoppingCondition.MaxRuntimeInSeconds", a.ko.Spec.StoppingCondition.MaxRuntimeInSeconds, b.ko.Spec.StoppingCondition.MaxRuntimeInSeconds) + } else if a.ko.Spec.StoppingCondition.MaxRuntimeInSeconds != nil && b.ko.Spec.StoppingCondition.MaxRuntimeInSeconds != nil { + if *a.ko.Spec.StoppingCondition.MaxRuntimeInSeconds != *b.ko.Spec.StoppingCondition.MaxRuntimeInSeconds { + delta.Add("Spec.StoppingCondition.MaxRuntimeInSeconds", a.ko.Spec.StoppingCondition.MaxRuntimeInSeconds, b.ko.Spec.StoppingCondition.MaxRuntimeInSeconds) + } + } + } + + return delta +} diff --git a/pkg/resource/processing_job/descriptor.go b/pkg/resource/processing_job/descriptor.go index 7b6252aa..4d3fba3f 100644 --- a/pkg/resource/processing_job/descriptor.go +++ b/pkg/resource/processing_job/descriptor.go @@ -18,8 +18,6 @@ package processing_job import ( ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" k8sapirt "k8s.io/apimachinery/pkg/runtime" k8sctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -65,37 +63,10 @@ func (d *resourceDescriptor) ResourceFromRuntimeObject( } } -// Equal returns true if the two supplied AWSResources have the same content. -// The underlying types of the two supplied AWSResources should be the same. In -// other words, the Equal() method should be called with the same concrete -// implementing AWSResource type -func (d *resourceDescriptor) Equal( - a acktypes.AWSResource, - b acktypes.AWSResource, -) bool { - ac := a.(*resource) - bc := b.(*resource) - opts := []cmp.Option{cmpopts.EquateEmpty()} - return cmp.Equal(ac.ko, bc.ko, opts...) -} - -// Diff returns a Reporter which provides the difference between two supplied -// AWSResources. The underlying types of the two supplied AWSResources should -// be the same. In other words, the Diff() method should be called with the -// same concrete implementing AWSResource type -func (d *resourceDescriptor) Diff( - a acktypes.AWSResource, - b acktypes.AWSResource, -) *ackcompare.Reporter { - ac := a.(*resource) - bc := b.(*resource) - var diffReporter ackcompare.Reporter - opts := []cmp.Option{ - cmp.Reporter(&diffReporter), - cmp.AllowUnexported(svcapitypes.ProcessingJob{}), - } - cmp.Equal(ac.ko, bc.ko, opts...) - return &diffReporter +// Delta returns an `ackcompare.Delta` object containing the difference between +// one `AWSResource` and another. +func (d *resourceDescriptor) Delta(a, b acktypes.AWSResource) *ackcompare.Delta { + return newResourceDelta(a.(*resource), b.(*resource)) } // UpdateCRStatus accepts an AWSResource object and changes the Status diff --git a/pkg/resource/processing_job/manager.go b/pkg/resource/processing_job/manager.go index b526d8f4..d0fa8ec3 100644 --- a/pkg/resource/processing_job/manager.go +++ b/pkg/resource/processing_job/manager.go @@ -125,7 +125,7 @@ func (rm *resourceManager) Update( ctx context.Context, resDesired acktypes.AWSResource, resLatest acktypes.AWSResource, - diffReporter *ackcompare.Reporter, + delta *ackcompare.Delta, ) (acktypes.AWSResource, error) { desired := rm.concreteResource(resDesired) latest := rm.concreteResource(resLatest) @@ -133,7 +133,7 @@ func (rm *resourceManager) Update( // Should never happen... if it does, it's buggy code. panic("resource manager's Update() method received resource with nil CR object") } - updated, err := rm.sdkUpdate(ctx, desired, latest, diffReporter) + updated, err := rm.sdkUpdate(ctx, desired, latest, delta) if err != nil { return rm.onError(latest, err) } diff --git a/pkg/resource/processing_job/resource.go b/pkg/resource/processing_job/resource.go index adfbf6ca..baa4da7f 100644 --- a/pkg/resource/processing_job/resource.go +++ b/pkg/resource/processing_job/resource.go @@ -24,7 +24,7 @@ import ( svcapitypes "github.com/aws-controllers-k8s/sagemaker-controller/apis/v1alpha1" ) -// resource implements the `aws-service-operator-k8s/pkg/types.AWSResource` +// resource implements the `aws-controller-k8s/runtime/pkg/types.AWSResource` // interface type resource struct { // The Kubernetes-native CR representing the resource diff --git a/pkg/resource/processing_job/sdk.go b/pkg/resource/processing_job/sdk.go index e597481e..ad70df79 100644 --- a/pkg/resource/processing_job/sdk.go +++ b/pkg/resource/processing_job/sdk.go @@ -95,6 +95,8 @@ func (rm *resourceManager) sdkFind( f0.ImageURI = resp.AppSpecification.ImageUri } ko.Spec.AppSpecification = f0 + } else { + ko.Spec.AppSpecification = nil } if resp.Environment != nil { f3 := map[string]*string{} @@ -104,6 +106,8 @@ func (rm *resourceManager) sdkFind( f3[f3key] = &f3val } ko.Spec.Environment = f3 + } else { + ko.Spec.Environment = nil } if resp.ExperimentConfig != nil { f5 := &svcapitypes.ExperimentConfig{} @@ -117,9 +121,13 @@ func (rm *resourceManager) sdkFind( f5.TrialName = resp.ExperimentConfig.TrialName } ko.Spec.ExperimentConfig = f5 + } else { + ko.Spec.ExperimentConfig = nil } if resp.FailureReason != nil { ko.Status.FailureReason = resp.FailureReason + } else { + ko.Status.FailureReason = nil } if resp.NetworkConfig != nil { f9 := &svcapitypes.NetworkConfig{} @@ -152,6 +160,8 @@ func (rm *resourceManager) sdkFind( f9.VPCConfig = f9f2 } ko.Spec.NetworkConfig = f9 + } else { + ko.Spec.NetworkConfig = nil } if resp.ProcessingInputs != nil { f11 := []*svcapitypes.ProcessingInput{} @@ -260,6 +270,8 @@ func (rm *resourceManager) sdkFind( f11 = append(f11, f11elem) } ko.Spec.ProcessingInputs = f11 + } else { + ko.Spec.ProcessingInputs = nil } if ko.Status.ACKResourceMetadata == nil { ko.Status.ACKResourceMetadata = &ackv1alpha1.ResourceMetadata{} @@ -270,9 +282,13 @@ func (rm *resourceManager) sdkFind( } if resp.ProcessingJobName != nil { ko.Spec.ProcessingJobName = resp.ProcessingJobName + } else { + ko.Spec.ProcessingJobName = nil } if resp.ProcessingJobStatus != nil { ko.Status.ProcessingJobStatus = resp.ProcessingJobStatus + } else { + ko.Status.ProcessingJobStatus = nil } if resp.ProcessingOutputConfig != nil { f15 := &svcapitypes.ProcessingOutputConfig{} @@ -314,6 +330,8 @@ func (rm *resourceManager) sdkFind( f15.Outputs = f15f1 } ko.Spec.ProcessingOutputConfig = f15 + } else { + ko.Spec.ProcessingOutputConfig = nil } if resp.ProcessingResources != nil { f16 := &svcapitypes.ProcessingResources{} @@ -334,9 +352,13 @@ func (rm *resourceManager) sdkFind( f16.ClusterConfig = f16f0 } ko.Spec.ProcessingResources = f16 + } else { + ko.Spec.ProcessingResources = nil } if resp.RoleArn != nil { ko.Spec.RoleARN = resp.RoleArn + } else { + ko.Spec.RoleARN = nil } if resp.StoppingCondition != nil { f19 := &svcapitypes.ProcessingStoppingCondition{} @@ -344,6 +366,8 @@ func (rm *resourceManager) sdkFind( f19.MaxRuntimeInSeconds = resp.StoppingCondition.MaxRuntimeInSeconds } ko.Spec.StoppingCondition = f19 + } else { + ko.Spec.StoppingCondition = nil } rm.setStatusDefaults(ko) @@ -677,20 +701,6 @@ func (rm *resourceManager) newCreateRequestPayload( } res.SetStoppingCondition(f9) } - if r.ko.Spec.Tags != nil { - f10 := []*svcsdk.Tag{} - for _, f10iter := range r.ko.Spec.Tags { - f10elem := &svcsdk.Tag{} - if f10iter.Key != nil { - f10elem.SetKey(*f10iter.Key) - } - if f10iter.Value != nil { - f10elem.SetValue(*f10iter.Value) - } - f10 = append(f10, f10elem) - } - res.SetTags(f10) - } return res, nil } @@ -701,7 +711,7 @@ func (rm *resourceManager) sdkUpdate( ctx context.Context, desired *resource, latest *resource, - diffReporter *ackcompare.Reporter, + delta *ackcompare.Delta, ) (*resource, error) { // TODO(jaypipes): Figure this out... return nil, ackerr.NotImplemented diff --git a/pkg/resource/training_job/delta.go b/pkg/resource/training_job/delta.go new file mode 100644 index 00000000..a79528c5 --- /dev/null +++ b/pkg/resource/training_job/delta.go @@ -0,0 +1,307 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package training_job + +import ( + ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" +) + +// newResourceDelta returns a new `ackcompare.Delta` used to compare two +// resources +func newResourceDelta( + a *resource, + b *resource, +) *ackcompare.Delta { + delta := ackcompare.NewDelta() + if (a == nil && b != nil) || + (a != nil && b == nil) { + delta.Add("", a, b) + return delta + } + + if ackcompare.HasNilDifference(a.ko.Spec.AlgorithmSpecification, b.ko.Spec.AlgorithmSpecification) { + delta.Add("Spec.AlgorithmSpecification", a.ko.Spec.AlgorithmSpecification, b.ko.Spec.AlgorithmSpecification) + } else if a.ko.Spec.AlgorithmSpecification != nil && b.ko.Spec.AlgorithmSpecification != nil { + if ackcompare.HasNilDifference(a.ko.Spec.AlgorithmSpecification.AlgorithmName, b.ko.Spec.AlgorithmSpecification.AlgorithmName) { + delta.Add("Spec.AlgorithmSpecification.AlgorithmName", a.ko.Spec.AlgorithmSpecification.AlgorithmName, b.ko.Spec.AlgorithmSpecification.AlgorithmName) + } else if a.ko.Spec.AlgorithmSpecification.AlgorithmName != nil && b.ko.Spec.AlgorithmSpecification.AlgorithmName != nil { + if *a.ko.Spec.AlgorithmSpecification.AlgorithmName != *b.ko.Spec.AlgorithmSpecification.AlgorithmName { + delta.Add("Spec.AlgorithmSpecification.AlgorithmName", a.ko.Spec.AlgorithmSpecification.AlgorithmName, b.ko.Spec.AlgorithmSpecification.AlgorithmName) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.AlgorithmSpecification.EnableSageMakerMetricsTimeSeries, b.ko.Spec.AlgorithmSpecification.EnableSageMakerMetricsTimeSeries) { + delta.Add("Spec.AlgorithmSpecification.EnableSageMakerMetricsTimeSeries", a.ko.Spec.AlgorithmSpecification.EnableSageMakerMetricsTimeSeries, b.ko.Spec.AlgorithmSpecification.EnableSageMakerMetricsTimeSeries) + } else if a.ko.Spec.AlgorithmSpecification.EnableSageMakerMetricsTimeSeries != nil && b.ko.Spec.AlgorithmSpecification.EnableSageMakerMetricsTimeSeries != nil { + if *a.ko.Spec.AlgorithmSpecification.EnableSageMakerMetricsTimeSeries != *b.ko.Spec.AlgorithmSpecification.EnableSageMakerMetricsTimeSeries { + delta.Add("Spec.AlgorithmSpecification.EnableSageMakerMetricsTimeSeries", a.ko.Spec.AlgorithmSpecification.EnableSageMakerMetricsTimeSeries, b.ko.Spec.AlgorithmSpecification.EnableSageMakerMetricsTimeSeries) + } + } + + if ackcompare.HasNilDifference(a.ko.Spec.AlgorithmSpecification.TrainingImage, b.ko.Spec.AlgorithmSpecification.TrainingImage) { + delta.Add("Spec.AlgorithmSpecification.TrainingImage", a.ko.Spec.AlgorithmSpecification.TrainingImage, b.ko.Spec.AlgorithmSpecification.TrainingImage) + } else if a.ko.Spec.AlgorithmSpecification.TrainingImage != nil && b.ko.Spec.AlgorithmSpecification.TrainingImage != nil { + if *a.ko.Spec.AlgorithmSpecification.TrainingImage != *b.ko.Spec.AlgorithmSpecification.TrainingImage { + delta.Add("Spec.AlgorithmSpecification.TrainingImage", a.ko.Spec.AlgorithmSpecification.TrainingImage, b.ko.Spec.AlgorithmSpecification.TrainingImage) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.AlgorithmSpecification.TrainingInputMode, b.ko.Spec.AlgorithmSpecification.TrainingInputMode) { + delta.Add("Spec.AlgorithmSpecification.TrainingInputMode", a.ko.Spec.AlgorithmSpecification.TrainingInputMode, b.ko.Spec.AlgorithmSpecification.TrainingInputMode) + } else if a.ko.Spec.AlgorithmSpecification.TrainingInputMode != nil && b.ko.Spec.AlgorithmSpecification.TrainingInputMode != nil { + if *a.ko.Spec.AlgorithmSpecification.TrainingInputMode != *b.ko.Spec.AlgorithmSpecification.TrainingInputMode { + delta.Add("Spec.AlgorithmSpecification.TrainingInputMode", a.ko.Spec.AlgorithmSpecification.TrainingInputMode, b.ko.Spec.AlgorithmSpecification.TrainingInputMode) + } + } + } + if ackcompare.HasNilDifference(a.ko.Spec.CheckpointConfig, b.ko.Spec.CheckpointConfig) { + delta.Add("Spec.CheckpointConfig", a.ko.Spec.CheckpointConfig, b.ko.Spec.CheckpointConfig) + } else if a.ko.Spec.CheckpointConfig != nil && b.ko.Spec.CheckpointConfig != nil { + if ackcompare.HasNilDifference(a.ko.Spec.CheckpointConfig.LocalPath, b.ko.Spec.CheckpointConfig.LocalPath) { + delta.Add("Spec.CheckpointConfig.LocalPath", a.ko.Spec.CheckpointConfig.LocalPath, b.ko.Spec.CheckpointConfig.LocalPath) + } else if a.ko.Spec.CheckpointConfig.LocalPath != nil && b.ko.Spec.CheckpointConfig.LocalPath != nil { + if *a.ko.Spec.CheckpointConfig.LocalPath != *b.ko.Spec.CheckpointConfig.LocalPath { + delta.Add("Spec.CheckpointConfig.LocalPath", a.ko.Spec.CheckpointConfig.LocalPath, b.ko.Spec.CheckpointConfig.LocalPath) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.CheckpointConfig.S3URI, b.ko.Spec.CheckpointConfig.S3URI) { + delta.Add("Spec.CheckpointConfig.S3URI", a.ko.Spec.CheckpointConfig.S3URI, b.ko.Spec.CheckpointConfig.S3URI) + } else if a.ko.Spec.CheckpointConfig.S3URI != nil && b.ko.Spec.CheckpointConfig.S3URI != nil { + if *a.ko.Spec.CheckpointConfig.S3URI != *b.ko.Spec.CheckpointConfig.S3URI { + delta.Add("Spec.CheckpointConfig.S3URI", a.ko.Spec.CheckpointConfig.S3URI, b.ko.Spec.CheckpointConfig.S3URI) + } + } + } + if ackcompare.HasNilDifference(a.ko.Spec.DebugHookConfig, b.ko.Spec.DebugHookConfig) { + delta.Add("Spec.DebugHookConfig", a.ko.Spec.DebugHookConfig, b.ko.Spec.DebugHookConfig) + } else if a.ko.Spec.DebugHookConfig != nil && b.ko.Spec.DebugHookConfig != nil { + + if ackcompare.HasNilDifference(a.ko.Spec.DebugHookConfig.HookParameters, b.ko.Spec.DebugHookConfig.HookParameters) { + delta.Add("Spec.DebugHookConfig.HookParameters", a.ko.Spec.DebugHookConfig.HookParameters, b.ko.Spec.DebugHookConfig.HookParameters) + } else if a.ko.Spec.DebugHookConfig.HookParameters != nil && b.ko.Spec.DebugHookConfig.HookParameters != nil { + if !ackcompare.MapStringStringPEqual(a.ko.Spec.DebugHookConfig.HookParameters, b.ko.Spec.DebugHookConfig.HookParameters) { + delta.Add("Spec.DebugHookConfig.HookParameters", a.ko.Spec.DebugHookConfig.HookParameters, b.ko.Spec.DebugHookConfig.HookParameters) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.DebugHookConfig.LocalPath, b.ko.Spec.DebugHookConfig.LocalPath) { + delta.Add("Spec.DebugHookConfig.LocalPath", a.ko.Spec.DebugHookConfig.LocalPath, b.ko.Spec.DebugHookConfig.LocalPath) + } else if a.ko.Spec.DebugHookConfig.LocalPath != nil && b.ko.Spec.DebugHookConfig.LocalPath != nil { + if *a.ko.Spec.DebugHookConfig.LocalPath != *b.ko.Spec.DebugHookConfig.LocalPath { + delta.Add("Spec.DebugHookConfig.LocalPath", a.ko.Spec.DebugHookConfig.LocalPath, b.ko.Spec.DebugHookConfig.LocalPath) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.DebugHookConfig.S3OutputPath, b.ko.Spec.DebugHookConfig.S3OutputPath) { + delta.Add("Spec.DebugHookConfig.S3OutputPath", a.ko.Spec.DebugHookConfig.S3OutputPath, b.ko.Spec.DebugHookConfig.S3OutputPath) + } else if a.ko.Spec.DebugHookConfig.S3OutputPath != nil && b.ko.Spec.DebugHookConfig.S3OutputPath != nil { + if *a.ko.Spec.DebugHookConfig.S3OutputPath != *b.ko.Spec.DebugHookConfig.S3OutputPath { + delta.Add("Spec.DebugHookConfig.S3OutputPath", a.ko.Spec.DebugHookConfig.S3OutputPath, b.ko.Spec.DebugHookConfig.S3OutputPath) + } + } + } + + if ackcompare.HasNilDifference(a.ko.Spec.EnableInterContainerTrafficEncryption, b.ko.Spec.EnableInterContainerTrafficEncryption) { + delta.Add("Spec.EnableInterContainerTrafficEncryption", a.ko.Spec.EnableInterContainerTrafficEncryption, b.ko.Spec.EnableInterContainerTrafficEncryption) + } else if a.ko.Spec.EnableInterContainerTrafficEncryption != nil && b.ko.Spec.EnableInterContainerTrafficEncryption != nil { + if *a.ko.Spec.EnableInterContainerTrafficEncryption != *b.ko.Spec.EnableInterContainerTrafficEncryption { + delta.Add("Spec.EnableInterContainerTrafficEncryption", a.ko.Spec.EnableInterContainerTrafficEncryption, b.ko.Spec.EnableInterContainerTrafficEncryption) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.EnableManagedSpotTraining, b.ko.Spec.EnableManagedSpotTraining) { + delta.Add("Spec.EnableManagedSpotTraining", a.ko.Spec.EnableManagedSpotTraining, b.ko.Spec.EnableManagedSpotTraining) + } else if a.ko.Spec.EnableManagedSpotTraining != nil && b.ko.Spec.EnableManagedSpotTraining != nil { + if *a.ko.Spec.EnableManagedSpotTraining != *b.ko.Spec.EnableManagedSpotTraining { + delta.Add("Spec.EnableManagedSpotTraining", a.ko.Spec.EnableManagedSpotTraining, b.ko.Spec.EnableManagedSpotTraining) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.EnableNetworkIsolation, b.ko.Spec.EnableNetworkIsolation) { + delta.Add("Spec.EnableNetworkIsolation", a.ko.Spec.EnableNetworkIsolation, b.ko.Spec.EnableNetworkIsolation) + } else if a.ko.Spec.EnableNetworkIsolation != nil && b.ko.Spec.EnableNetworkIsolation != nil { + if *a.ko.Spec.EnableNetworkIsolation != *b.ko.Spec.EnableNetworkIsolation { + delta.Add("Spec.EnableNetworkIsolation", a.ko.Spec.EnableNetworkIsolation, b.ko.Spec.EnableNetworkIsolation) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.ExperimentConfig, b.ko.Spec.ExperimentConfig) { + delta.Add("Spec.ExperimentConfig", a.ko.Spec.ExperimentConfig, b.ko.Spec.ExperimentConfig) + } else if a.ko.Spec.ExperimentConfig != nil && b.ko.Spec.ExperimentConfig != nil { + if ackcompare.HasNilDifference(a.ko.Spec.ExperimentConfig.ExperimentName, b.ko.Spec.ExperimentConfig.ExperimentName) { + delta.Add("Spec.ExperimentConfig.ExperimentName", a.ko.Spec.ExperimentConfig.ExperimentName, b.ko.Spec.ExperimentConfig.ExperimentName) + } else if a.ko.Spec.ExperimentConfig.ExperimentName != nil && b.ko.Spec.ExperimentConfig.ExperimentName != nil { + if *a.ko.Spec.ExperimentConfig.ExperimentName != *b.ko.Spec.ExperimentConfig.ExperimentName { + delta.Add("Spec.ExperimentConfig.ExperimentName", a.ko.Spec.ExperimentConfig.ExperimentName, b.ko.Spec.ExperimentConfig.ExperimentName) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.ExperimentConfig.TrialComponentDisplayName, b.ko.Spec.ExperimentConfig.TrialComponentDisplayName) { + delta.Add("Spec.ExperimentConfig.TrialComponentDisplayName", a.ko.Spec.ExperimentConfig.TrialComponentDisplayName, b.ko.Spec.ExperimentConfig.TrialComponentDisplayName) + } else if a.ko.Spec.ExperimentConfig.TrialComponentDisplayName != nil && b.ko.Spec.ExperimentConfig.TrialComponentDisplayName != nil { + if *a.ko.Spec.ExperimentConfig.TrialComponentDisplayName != *b.ko.Spec.ExperimentConfig.TrialComponentDisplayName { + delta.Add("Spec.ExperimentConfig.TrialComponentDisplayName", a.ko.Spec.ExperimentConfig.TrialComponentDisplayName, b.ko.Spec.ExperimentConfig.TrialComponentDisplayName) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.ExperimentConfig.TrialName, b.ko.Spec.ExperimentConfig.TrialName) { + delta.Add("Spec.ExperimentConfig.TrialName", a.ko.Spec.ExperimentConfig.TrialName, b.ko.Spec.ExperimentConfig.TrialName) + } else if a.ko.Spec.ExperimentConfig.TrialName != nil && b.ko.Spec.ExperimentConfig.TrialName != nil { + if *a.ko.Spec.ExperimentConfig.TrialName != *b.ko.Spec.ExperimentConfig.TrialName { + delta.Add("Spec.ExperimentConfig.TrialName", a.ko.Spec.ExperimentConfig.TrialName, b.ko.Spec.ExperimentConfig.TrialName) + } + } + } + if ackcompare.HasNilDifference(a.ko.Spec.HyperParameters, b.ko.Spec.HyperParameters) { + delta.Add("Spec.HyperParameters", a.ko.Spec.HyperParameters, b.ko.Spec.HyperParameters) + } else if a.ko.Spec.HyperParameters != nil && b.ko.Spec.HyperParameters != nil { + if !ackcompare.MapStringStringPEqual(a.ko.Spec.HyperParameters, b.ko.Spec.HyperParameters) { + delta.Add("Spec.HyperParameters", a.ko.Spec.HyperParameters, b.ko.Spec.HyperParameters) + } + } + + if ackcompare.HasNilDifference(a.ko.Spec.OutputDataConfig, b.ko.Spec.OutputDataConfig) { + delta.Add("Spec.OutputDataConfig", a.ko.Spec.OutputDataConfig, b.ko.Spec.OutputDataConfig) + } else if a.ko.Spec.OutputDataConfig != nil && b.ko.Spec.OutputDataConfig != nil { + if ackcompare.HasNilDifference(a.ko.Spec.OutputDataConfig.KMSKeyID, b.ko.Spec.OutputDataConfig.KMSKeyID) { + delta.Add("Spec.OutputDataConfig.KMSKeyID", a.ko.Spec.OutputDataConfig.KMSKeyID, b.ko.Spec.OutputDataConfig.KMSKeyID) + } else if a.ko.Spec.OutputDataConfig.KMSKeyID != nil && b.ko.Spec.OutputDataConfig.KMSKeyID != nil { + if *a.ko.Spec.OutputDataConfig.KMSKeyID != *b.ko.Spec.OutputDataConfig.KMSKeyID { + delta.Add("Spec.OutputDataConfig.KMSKeyID", a.ko.Spec.OutputDataConfig.KMSKeyID, b.ko.Spec.OutputDataConfig.KMSKeyID) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.OutputDataConfig.S3OutputPath, b.ko.Spec.OutputDataConfig.S3OutputPath) { + delta.Add("Spec.OutputDataConfig.S3OutputPath", a.ko.Spec.OutputDataConfig.S3OutputPath, b.ko.Spec.OutputDataConfig.S3OutputPath) + } else if a.ko.Spec.OutputDataConfig.S3OutputPath != nil && b.ko.Spec.OutputDataConfig.S3OutputPath != nil { + if *a.ko.Spec.OutputDataConfig.S3OutputPath != *b.ko.Spec.OutputDataConfig.S3OutputPath { + delta.Add("Spec.OutputDataConfig.S3OutputPath", a.ko.Spec.OutputDataConfig.S3OutputPath, b.ko.Spec.OutputDataConfig.S3OutputPath) + } + } + } + if ackcompare.HasNilDifference(a.ko.Spec.ProfilerConfig, b.ko.Spec.ProfilerConfig) { + delta.Add("Spec.ProfilerConfig", a.ko.Spec.ProfilerConfig, b.ko.Spec.ProfilerConfig) + } else if a.ko.Spec.ProfilerConfig != nil && b.ko.Spec.ProfilerConfig != nil { + if ackcompare.HasNilDifference(a.ko.Spec.ProfilerConfig.ProfilingIntervalInMilliseconds, b.ko.Spec.ProfilerConfig.ProfilingIntervalInMilliseconds) { + delta.Add("Spec.ProfilerConfig.ProfilingIntervalInMilliseconds", a.ko.Spec.ProfilerConfig.ProfilingIntervalInMilliseconds, b.ko.Spec.ProfilerConfig.ProfilingIntervalInMilliseconds) + } else if a.ko.Spec.ProfilerConfig.ProfilingIntervalInMilliseconds != nil && b.ko.Spec.ProfilerConfig.ProfilingIntervalInMilliseconds != nil { + if *a.ko.Spec.ProfilerConfig.ProfilingIntervalInMilliseconds != *b.ko.Spec.ProfilerConfig.ProfilingIntervalInMilliseconds { + delta.Add("Spec.ProfilerConfig.ProfilingIntervalInMilliseconds", a.ko.Spec.ProfilerConfig.ProfilingIntervalInMilliseconds, b.ko.Spec.ProfilerConfig.ProfilingIntervalInMilliseconds) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.ProfilerConfig.ProfilingParameters, b.ko.Spec.ProfilerConfig.ProfilingParameters) { + delta.Add("Spec.ProfilerConfig.ProfilingParameters", a.ko.Spec.ProfilerConfig.ProfilingParameters, b.ko.Spec.ProfilerConfig.ProfilingParameters) + } else if a.ko.Spec.ProfilerConfig.ProfilingParameters != nil && b.ko.Spec.ProfilerConfig.ProfilingParameters != nil { + if !ackcompare.MapStringStringPEqual(a.ko.Spec.ProfilerConfig.ProfilingParameters, b.ko.Spec.ProfilerConfig.ProfilingParameters) { + delta.Add("Spec.ProfilerConfig.ProfilingParameters", a.ko.Spec.ProfilerConfig.ProfilingParameters, b.ko.Spec.ProfilerConfig.ProfilingParameters) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.ProfilerConfig.S3OutputPath, b.ko.Spec.ProfilerConfig.S3OutputPath) { + delta.Add("Spec.ProfilerConfig.S3OutputPath", a.ko.Spec.ProfilerConfig.S3OutputPath, b.ko.Spec.ProfilerConfig.S3OutputPath) + } else if a.ko.Spec.ProfilerConfig.S3OutputPath != nil && b.ko.Spec.ProfilerConfig.S3OutputPath != nil { + if *a.ko.Spec.ProfilerConfig.S3OutputPath != *b.ko.Spec.ProfilerConfig.S3OutputPath { + delta.Add("Spec.ProfilerConfig.S3OutputPath", a.ko.Spec.ProfilerConfig.S3OutputPath, b.ko.Spec.ProfilerConfig.S3OutputPath) + } + } + } + + if ackcompare.HasNilDifference(a.ko.Spec.ResourceConfig, b.ko.Spec.ResourceConfig) { + delta.Add("Spec.ResourceConfig", a.ko.Spec.ResourceConfig, b.ko.Spec.ResourceConfig) + } else if a.ko.Spec.ResourceConfig != nil && b.ko.Spec.ResourceConfig != nil { + if ackcompare.HasNilDifference(a.ko.Spec.ResourceConfig.InstanceCount, b.ko.Spec.ResourceConfig.InstanceCount) { + delta.Add("Spec.ResourceConfig.InstanceCount", a.ko.Spec.ResourceConfig.InstanceCount, b.ko.Spec.ResourceConfig.InstanceCount) + } else if a.ko.Spec.ResourceConfig.InstanceCount != nil && b.ko.Spec.ResourceConfig.InstanceCount != nil { + if *a.ko.Spec.ResourceConfig.InstanceCount != *b.ko.Spec.ResourceConfig.InstanceCount { + delta.Add("Spec.ResourceConfig.InstanceCount", a.ko.Spec.ResourceConfig.InstanceCount, b.ko.Spec.ResourceConfig.InstanceCount) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.ResourceConfig.InstanceType, b.ko.Spec.ResourceConfig.InstanceType) { + delta.Add("Spec.ResourceConfig.InstanceType", a.ko.Spec.ResourceConfig.InstanceType, b.ko.Spec.ResourceConfig.InstanceType) + } else if a.ko.Spec.ResourceConfig.InstanceType != nil && b.ko.Spec.ResourceConfig.InstanceType != nil { + if *a.ko.Spec.ResourceConfig.InstanceType != *b.ko.Spec.ResourceConfig.InstanceType { + delta.Add("Spec.ResourceConfig.InstanceType", a.ko.Spec.ResourceConfig.InstanceType, b.ko.Spec.ResourceConfig.InstanceType) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.ResourceConfig.VolumeKMSKeyID, b.ko.Spec.ResourceConfig.VolumeKMSKeyID) { + delta.Add("Spec.ResourceConfig.VolumeKMSKeyID", a.ko.Spec.ResourceConfig.VolumeKMSKeyID, b.ko.Spec.ResourceConfig.VolumeKMSKeyID) + } else if a.ko.Spec.ResourceConfig.VolumeKMSKeyID != nil && b.ko.Spec.ResourceConfig.VolumeKMSKeyID != nil { + if *a.ko.Spec.ResourceConfig.VolumeKMSKeyID != *b.ko.Spec.ResourceConfig.VolumeKMSKeyID { + delta.Add("Spec.ResourceConfig.VolumeKMSKeyID", a.ko.Spec.ResourceConfig.VolumeKMSKeyID, b.ko.Spec.ResourceConfig.VolumeKMSKeyID) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.ResourceConfig.VolumeSizeInGB, b.ko.Spec.ResourceConfig.VolumeSizeInGB) { + delta.Add("Spec.ResourceConfig.VolumeSizeInGB", a.ko.Spec.ResourceConfig.VolumeSizeInGB, b.ko.Spec.ResourceConfig.VolumeSizeInGB) + } else if a.ko.Spec.ResourceConfig.VolumeSizeInGB != nil && b.ko.Spec.ResourceConfig.VolumeSizeInGB != nil { + if *a.ko.Spec.ResourceConfig.VolumeSizeInGB != *b.ko.Spec.ResourceConfig.VolumeSizeInGB { + delta.Add("Spec.ResourceConfig.VolumeSizeInGB", a.ko.Spec.ResourceConfig.VolumeSizeInGB, b.ko.Spec.ResourceConfig.VolumeSizeInGB) + } + } + } + if ackcompare.HasNilDifference(a.ko.Spec.RoleARN, b.ko.Spec.RoleARN) { + delta.Add("Spec.RoleARN", a.ko.Spec.RoleARN, b.ko.Spec.RoleARN) + } else if a.ko.Spec.RoleARN != nil && b.ko.Spec.RoleARN != nil { + if *a.ko.Spec.RoleARN != *b.ko.Spec.RoleARN { + delta.Add("Spec.RoleARN", a.ko.Spec.RoleARN, b.ko.Spec.RoleARN) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.StoppingCondition, b.ko.Spec.StoppingCondition) { + delta.Add("Spec.StoppingCondition", a.ko.Spec.StoppingCondition, b.ko.Spec.StoppingCondition) + } else if a.ko.Spec.StoppingCondition != nil && b.ko.Spec.StoppingCondition != nil { + if ackcompare.HasNilDifference(a.ko.Spec.StoppingCondition.MaxRuntimeInSeconds, b.ko.Spec.StoppingCondition.MaxRuntimeInSeconds) { + delta.Add("Spec.StoppingCondition.MaxRuntimeInSeconds", a.ko.Spec.StoppingCondition.MaxRuntimeInSeconds, b.ko.Spec.StoppingCondition.MaxRuntimeInSeconds) + } else if a.ko.Spec.StoppingCondition.MaxRuntimeInSeconds != nil && b.ko.Spec.StoppingCondition.MaxRuntimeInSeconds != nil { + if *a.ko.Spec.StoppingCondition.MaxRuntimeInSeconds != *b.ko.Spec.StoppingCondition.MaxRuntimeInSeconds { + delta.Add("Spec.StoppingCondition.MaxRuntimeInSeconds", a.ko.Spec.StoppingCondition.MaxRuntimeInSeconds, b.ko.Spec.StoppingCondition.MaxRuntimeInSeconds) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.StoppingCondition.MaxWaitTimeInSeconds, b.ko.Spec.StoppingCondition.MaxWaitTimeInSeconds) { + delta.Add("Spec.StoppingCondition.MaxWaitTimeInSeconds", a.ko.Spec.StoppingCondition.MaxWaitTimeInSeconds, b.ko.Spec.StoppingCondition.MaxWaitTimeInSeconds) + } else if a.ko.Spec.StoppingCondition.MaxWaitTimeInSeconds != nil && b.ko.Spec.StoppingCondition.MaxWaitTimeInSeconds != nil { + if *a.ko.Spec.StoppingCondition.MaxWaitTimeInSeconds != *b.ko.Spec.StoppingCondition.MaxWaitTimeInSeconds { + delta.Add("Spec.StoppingCondition.MaxWaitTimeInSeconds", a.ko.Spec.StoppingCondition.MaxWaitTimeInSeconds, b.ko.Spec.StoppingCondition.MaxWaitTimeInSeconds) + } + } + } + if ackcompare.HasNilDifference(a.ko.Spec.TensorBoardOutputConfig, b.ko.Spec.TensorBoardOutputConfig) { + delta.Add("Spec.TensorBoardOutputConfig", a.ko.Spec.TensorBoardOutputConfig, b.ko.Spec.TensorBoardOutputConfig) + } else if a.ko.Spec.TensorBoardOutputConfig != nil && b.ko.Spec.TensorBoardOutputConfig != nil { + if ackcompare.HasNilDifference(a.ko.Spec.TensorBoardOutputConfig.LocalPath, b.ko.Spec.TensorBoardOutputConfig.LocalPath) { + delta.Add("Spec.TensorBoardOutputConfig.LocalPath", a.ko.Spec.TensorBoardOutputConfig.LocalPath, b.ko.Spec.TensorBoardOutputConfig.LocalPath) + } else if a.ko.Spec.TensorBoardOutputConfig.LocalPath != nil && b.ko.Spec.TensorBoardOutputConfig.LocalPath != nil { + if *a.ko.Spec.TensorBoardOutputConfig.LocalPath != *b.ko.Spec.TensorBoardOutputConfig.LocalPath { + delta.Add("Spec.TensorBoardOutputConfig.LocalPath", a.ko.Spec.TensorBoardOutputConfig.LocalPath, b.ko.Spec.TensorBoardOutputConfig.LocalPath) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.TensorBoardOutputConfig.S3OutputPath, b.ko.Spec.TensorBoardOutputConfig.S3OutputPath) { + delta.Add("Spec.TensorBoardOutputConfig.S3OutputPath", a.ko.Spec.TensorBoardOutputConfig.S3OutputPath, b.ko.Spec.TensorBoardOutputConfig.S3OutputPath) + } else if a.ko.Spec.TensorBoardOutputConfig.S3OutputPath != nil && b.ko.Spec.TensorBoardOutputConfig.S3OutputPath != nil { + if *a.ko.Spec.TensorBoardOutputConfig.S3OutputPath != *b.ko.Spec.TensorBoardOutputConfig.S3OutputPath { + delta.Add("Spec.TensorBoardOutputConfig.S3OutputPath", a.ko.Spec.TensorBoardOutputConfig.S3OutputPath, b.ko.Spec.TensorBoardOutputConfig.S3OutputPath) + } + } + } + if ackcompare.HasNilDifference(a.ko.Spec.TrainingJobName, b.ko.Spec.TrainingJobName) { + delta.Add("Spec.TrainingJobName", a.ko.Spec.TrainingJobName, b.ko.Spec.TrainingJobName) + } else if a.ko.Spec.TrainingJobName != nil && b.ko.Spec.TrainingJobName != nil { + if *a.ko.Spec.TrainingJobName != *b.ko.Spec.TrainingJobName { + delta.Add("Spec.TrainingJobName", a.ko.Spec.TrainingJobName, b.ko.Spec.TrainingJobName) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.VPCConfig, b.ko.Spec.VPCConfig) { + delta.Add("Spec.VPCConfig", a.ko.Spec.VPCConfig, b.ko.Spec.VPCConfig) + } else if a.ko.Spec.VPCConfig != nil && b.ko.Spec.VPCConfig != nil { + + if !ackcompare.SliceStringPEqual(a.ko.Spec.VPCConfig.SecurityGroupIDs, b.ko.Spec.VPCConfig.SecurityGroupIDs) { + delta.Add("Spec.VPCConfig.SecurityGroupIDs", a.ko.Spec.VPCConfig.SecurityGroupIDs, b.ko.Spec.VPCConfig.SecurityGroupIDs) + } + + if !ackcompare.SliceStringPEqual(a.ko.Spec.VPCConfig.Subnets, b.ko.Spec.VPCConfig.Subnets) { + delta.Add("Spec.VPCConfig.Subnets", a.ko.Spec.VPCConfig.Subnets, b.ko.Spec.VPCConfig.Subnets) + } + } + + return delta +} diff --git a/pkg/resource/training_job/descriptor.go b/pkg/resource/training_job/descriptor.go index 794cb286..bcfdf2ae 100644 --- a/pkg/resource/training_job/descriptor.go +++ b/pkg/resource/training_job/descriptor.go @@ -18,8 +18,6 @@ package training_job import ( ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" k8sapirt "k8s.io/apimachinery/pkg/runtime" k8sctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -65,37 +63,10 @@ func (d *resourceDescriptor) ResourceFromRuntimeObject( } } -// Equal returns true if the two supplied AWSResources have the same content. -// The underlying types of the two supplied AWSResources should be the same. In -// other words, the Equal() method should be called with the same concrete -// implementing AWSResource type -func (d *resourceDescriptor) Equal( - a acktypes.AWSResource, - b acktypes.AWSResource, -) bool { - ac := a.(*resource) - bc := b.(*resource) - opts := []cmp.Option{cmpopts.EquateEmpty()} - return cmp.Equal(ac.ko, bc.ko, opts...) -} - -// Diff returns a Reporter which provides the difference between two supplied -// AWSResources. The underlying types of the two supplied AWSResources should -// be the same. In other words, the Diff() method should be called with the -// same concrete implementing AWSResource type -func (d *resourceDescriptor) Diff( - a acktypes.AWSResource, - b acktypes.AWSResource, -) *ackcompare.Reporter { - ac := a.(*resource) - bc := b.(*resource) - var diffReporter ackcompare.Reporter - opts := []cmp.Option{ - cmp.Reporter(&diffReporter), - cmp.AllowUnexported(svcapitypes.TrainingJob{}), - } - cmp.Equal(ac.ko, bc.ko, opts...) - return &diffReporter +// Delta returns an `ackcompare.Delta` object containing the difference between +// one `AWSResource` and another. +func (d *resourceDescriptor) Delta(a, b acktypes.AWSResource) *ackcompare.Delta { + return newResourceDelta(a.(*resource), b.(*resource)) } // UpdateCRStatus accepts an AWSResource object and changes the Status diff --git a/pkg/resource/training_job/manager.go b/pkg/resource/training_job/manager.go index 0aa9370c..64e99d6a 100644 --- a/pkg/resource/training_job/manager.go +++ b/pkg/resource/training_job/manager.go @@ -125,7 +125,7 @@ func (rm *resourceManager) Update( ctx context.Context, resDesired acktypes.AWSResource, resLatest acktypes.AWSResource, - diffReporter *ackcompare.Reporter, + delta *ackcompare.Delta, ) (acktypes.AWSResource, error) { desired := rm.concreteResource(resDesired) latest := rm.concreteResource(resLatest) @@ -133,7 +133,7 @@ func (rm *resourceManager) Update( // Should never happen... if it does, it's buggy code. panic("resource manager's Update() method received resource with nil CR object") } - updated, err := rm.sdkUpdate(ctx, desired, latest, diffReporter) + updated, err := rm.sdkUpdate(ctx, desired, latest, delta) if err != nil { return rm.onError(latest, err) } diff --git a/pkg/resource/training_job/resource.go b/pkg/resource/training_job/resource.go index b647eb93..156fd172 100644 --- a/pkg/resource/training_job/resource.go +++ b/pkg/resource/training_job/resource.go @@ -24,7 +24,7 @@ import ( svcapitypes "github.com/aws-controllers-k8s/sagemaker-controller/apis/v1alpha1" ) -// resource implements the `aws-service-operator-k8s/pkg/types.AWSResource` +// resource implements the `aws-controller-k8s/runtime/pkg/types.AWSResource` // interface type resource struct { // The Kubernetes-native CR representing the resource diff --git a/pkg/resource/training_job/sdk.go b/pkg/resource/training_job/sdk.go index 746c12fc..e3c746bd 100644 --- a/pkg/resource/training_job/sdk.go +++ b/pkg/resource/training_job/sdk.go @@ -100,6 +100,8 @@ func (rm *resourceManager) sdkFind( f0.TrainingInputMode = resp.AlgorithmSpecification.TrainingInputMode } ko.Spec.AlgorithmSpecification = f0 + } else { + ko.Spec.AlgorithmSpecification = nil } if resp.CheckpointConfig != nil { f3 := &svcapitypes.CheckpointConfig{} @@ -110,6 +112,8 @@ func (rm *resourceManager) sdkFind( f3.S3URI = resp.CheckpointConfig.S3Uri } ko.Spec.CheckpointConfig = f3 + } else { + ko.Spec.CheckpointConfig = nil } if resp.DebugHookConfig != nil { f5 := &svcapitypes.DebugHookConfig{} @@ -149,6 +153,8 @@ func (rm *resourceManager) sdkFind( f5.S3OutputPath = resp.DebugHookConfig.S3OutputPath } ko.Spec.DebugHookConfig = f5 + } else { + ko.Spec.DebugHookConfig = nil } if resp.DebugRuleConfigurations != nil { f6 := []*svcapitypes.DebugRuleConfiguration{} @@ -184,15 +190,23 @@ func (rm *resourceManager) sdkFind( f6 = append(f6, f6elem) } ko.Spec.DebugRuleConfigurations = f6 + } else { + ko.Spec.DebugRuleConfigurations = nil } if resp.EnableInterContainerTrafficEncryption != nil { ko.Spec.EnableInterContainerTrafficEncryption = resp.EnableInterContainerTrafficEncryption + } else { + ko.Spec.EnableInterContainerTrafficEncryption = nil } if resp.EnableManagedSpotTraining != nil { ko.Spec.EnableManagedSpotTraining = resp.EnableManagedSpotTraining + } else { + ko.Spec.EnableManagedSpotTraining = nil } if resp.EnableNetworkIsolation != nil { ko.Spec.EnableNetworkIsolation = resp.EnableNetworkIsolation + } else { + ko.Spec.EnableNetworkIsolation = nil } if resp.ExperimentConfig != nil { f11 := &svcapitypes.ExperimentConfig{} @@ -206,9 +220,13 @@ func (rm *resourceManager) sdkFind( f11.TrialName = resp.ExperimentConfig.TrialName } ko.Spec.ExperimentConfig = f11 + } else { + ko.Spec.ExperimentConfig = nil } if resp.FailureReason != nil { ko.Status.FailureReason = resp.FailureReason + } else { + ko.Status.FailureReason = nil } if resp.HyperParameters != nil { f14 := map[string]*string{} @@ -218,6 +236,8 @@ func (rm *resourceManager) sdkFind( f14[f14key] = &f14val } ko.Spec.HyperParameters = f14 + } else { + ko.Spec.HyperParameters = nil } if resp.InputDataConfig != nil { f15 := []*svcapitypes.Channel{} @@ -290,6 +310,8 @@ func (rm *resourceManager) sdkFind( f15 = append(f15, f15elem) } ko.Spec.InputDataConfig = f15 + } else { + ko.Spec.InputDataConfig = nil } if resp.OutputDataConfig != nil { f19 := &svcapitypes.OutputDataConfig{} @@ -300,6 +322,8 @@ func (rm *resourceManager) sdkFind( f19.S3OutputPath = resp.OutputDataConfig.S3OutputPath } ko.Spec.OutputDataConfig = f19 + } else { + ko.Spec.OutputDataConfig = nil } if resp.ProfilerConfig != nil { f20 := &svcapitypes.ProfilerConfig{} @@ -319,6 +343,8 @@ func (rm *resourceManager) sdkFind( f20.S3OutputPath = resp.ProfilerConfig.S3OutputPath } ko.Spec.ProfilerConfig = f20 + } else { + ko.Spec.ProfilerConfig = nil } if resp.ProfilerRuleConfigurations != nil { f21 := []*svcapitypes.ProfilerRuleConfiguration{} @@ -354,6 +380,8 @@ func (rm *resourceManager) sdkFind( f21 = append(f21, f21elem) } ko.Spec.ProfilerRuleConfigurations = f21 + } else { + ko.Spec.ProfilerRuleConfigurations = nil } if resp.ResourceConfig != nil { f24 := &svcapitypes.ResourceConfig{} @@ -370,12 +398,18 @@ func (rm *resourceManager) sdkFind( f24.VolumeSizeInGB = resp.ResourceConfig.VolumeSizeInGB } ko.Spec.ResourceConfig = f24 + } else { + ko.Spec.ResourceConfig = nil } if resp.RoleArn != nil { ko.Spec.RoleARN = resp.RoleArn + } else { + ko.Spec.RoleARN = nil } if resp.SecondaryStatus != nil { ko.Status.SecondaryStatus = resp.SecondaryStatus + } else { + ko.Status.SecondaryStatus = nil } if resp.StoppingCondition != nil { f28 := &svcapitypes.StoppingCondition{} @@ -386,6 +420,8 @@ func (rm *resourceManager) sdkFind( f28.MaxWaitTimeInSeconds = resp.StoppingCondition.MaxWaitTimeInSeconds } ko.Spec.StoppingCondition = f28 + } else { + ko.Spec.StoppingCondition = nil } if resp.TensorBoardOutputConfig != nil { f29 := &svcapitypes.TensorBoardOutputConfig{} @@ -396,6 +432,8 @@ func (rm *resourceManager) sdkFind( f29.S3OutputPath = resp.TensorBoardOutputConfig.S3OutputPath } ko.Spec.TensorBoardOutputConfig = f29 + } else { + ko.Spec.TensorBoardOutputConfig = nil } if ko.Status.ACKResourceMetadata == nil { ko.Status.ACKResourceMetadata = &ackv1alpha1.ResourceMetadata{} @@ -406,9 +444,13 @@ func (rm *resourceManager) sdkFind( } if resp.TrainingJobName != nil { ko.Spec.TrainingJobName = resp.TrainingJobName + } else { + ko.Spec.TrainingJobName = nil } if resp.TrainingJobStatus != nil { ko.Status.TrainingJobStatus = resp.TrainingJobStatus + } else { + ko.Status.TrainingJobStatus = nil } if resp.VpcConfig != nil { f37 := &svcapitypes.VPCConfig{} @@ -431,6 +473,8 @@ func (rm *resourceManager) sdkFind( f37.Subnets = f37f1 } ko.Spec.VPCConfig = f37 + } else { + ko.Spec.VPCConfig = nil } rm.setStatusDefaults(ko) @@ -813,54 +857,40 @@ func (rm *resourceManager) newCreateRequestPayload( } res.SetStoppingCondition(f15) } - if r.ko.Spec.Tags != nil { - f16 := []*svcsdk.Tag{} - for _, f16iter := range r.ko.Spec.Tags { - f16elem := &svcsdk.Tag{} - if f16iter.Key != nil { - f16elem.SetKey(*f16iter.Key) - } - if f16iter.Value != nil { - f16elem.SetValue(*f16iter.Value) - } - f16 = append(f16, f16elem) - } - res.SetTags(f16) - } if r.ko.Spec.TensorBoardOutputConfig != nil { - f17 := &svcsdk.TensorBoardOutputConfig{} + f16 := &svcsdk.TensorBoardOutputConfig{} if r.ko.Spec.TensorBoardOutputConfig.LocalPath != nil { - f17.SetLocalPath(*r.ko.Spec.TensorBoardOutputConfig.LocalPath) + f16.SetLocalPath(*r.ko.Spec.TensorBoardOutputConfig.LocalPath) } if r.ko.Spec.TensorBoardOutputConfig.S3OutputPath != nil { - f17.SetS3OutputPath(*r.ko.Spec.TensorBoardOutputConfig.S3OutputPath) + f16.SetS3OutputPath(*r.ko.Spec.TensorBoardOutputConfig.S3OutputPath) } - res.SetTensorBoardOutputConfig(f17) + res.SetTensorBoardOutputConfig(f16) } if r.ko.Spec.TrainingJobName != nil { res.SetTrainingJobName(*r.ko.Spec.TrainingJobName) } if r.ko.Spec.VPCConfig != nil { - f19 := &svcsdk.VpcConfig{} + f18 := &svcsdk.VpcConfig{} if r.ko.Spec.VPCConfig.SecurityGroupIDs != nil { - f19f0 := []*string{} - for _, f19f0iter := range r.ko.Spec.VPCConfig.SecurityGroupIDs { - var f19f0elem string - f19f0elem = *f19f0iter - f19f0 = append(f19f0, &f19f0elem) + f18f0 := []*string{} + for _, f18f0iter := range r.ko.Spec.VPCConfig.SecurityGroupIDs { + var f18f0elem string + f18f0elem = *f18f0iter + f18f0 = append(f18f0, &f18f0elem) } - f19.SetSecurityGroupIds(f19f0) + f18.SetSecurityGroupIds(f18f0) } if r.ko.Spec.VPCConfig.Subnets != nil { - f19f1 := []*string{} - for _, f19f1iter := range r.ko.Spec.VPCConfig.Subnets { - var f19f1elem string - f19f1elem = *f19f1iter - f19f1 = append(f19f1, &f19f1elem) + f18f1 := []*string{} + for _, f18f1iter := range r.ko.Spec.VPCConfig.Subnets { + var f18f1elem string + f18f1elem = *f18f1iter + f18f1 = append(f18f1, &f18f1elem) } - f19.SetSubnets(f19f1) + f18.SetSubnets(f18f1) } - res.SetVpcConfig(f19) + res.SetVpcConfig(f18) } return res, nil @@ -872,7 +902,7 @@ func (rm *resourceManager) sdkUpdate( ctx context.Context, desired *resource, latest *resource, - diffReporter *ackcompare.Reporter, + delta *ackcompare.Delta, ) (*resource, error) { // TODO(jaypipes): Figure this out... return nil, ackerr.NotImplemented diff --git a/pkg/resource/transform_job/delta.go b/pkg/resource/transform_job/delta.go new file mode 100644 index 00000000..863eed13 --- /dev/null +++ b/pkg/resource/transform_job/delta.go @@ -0,0 +1,251 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +// Code generated by ack-generate. DO NOT EDIT. + +package transform_job + +import ( + ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" +) + +// newResourceDelta returns a new `ackcompare.Delta` used to compare two +// resources +func newResourceDelta( + a *resource, + b *resource, +) *ackcompare.Delta { + delta := ackcompare.NewDelta() + if (a == nil && b != nil) || + (a != nil && b == nil) { + delta.Add("", a, b) + return delta + } + + if ackcompare.HasNilDifference(a.ko.Spec.BatchStrategy, b.ko.Spec.BatchStrategy) { + delta.Add("Spec.BatchStrategy", a.ko.Spec.BatchStrategy, b.ko.Spec.BatchStrategy) + } else if a.ko.Spec.BatchStrategy != nil && b.ko.Spec.BatchStrategy != nil { + if *a.ko.Spec.BatchStrategy != *b.ko.Spec.BatchStrategy { + delta.Add("Spec.BatchStrategy", a.ko.Spec.BatchStrategy, b.ko.Spec.BatchStrategy) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.DataProcessing, b.ko.Spec.DataProcessing) { + delta.Add("Spec.DataProcessing", a.ko.Spec.DataProcessing, b.ko.Spec.DataProcessing) + } else if a.ko.Spec.DataProcessing != nil && b.ko.Spec.DataProcessing != nil { + if ackcompare.HasNilDifference(a.ko.Spec.DataProcessing.InputFilter, b.ko.Spec.DataProcessing.InputFilter) { + delta.Add("Spec.DataProcessing.InputFilter", a.ko.Spec.DataProcessing.InputFilter, b.ko.Spec.DataProcessing.InputFilter) + } else if a.ko.Spec.DataProcessing.InputFilter != nil && b.ko.Spec.DataProcessing.InputFilter != nil { + if *a.ko.Spec.DataProcessing.InputFilter != *b.ko.Spec.DataProcessing.InputFilter { + delta.Add("Spec.DataProcessing.InputFilter", a.ko.Spec.DataProcessing.InputFilter, b.ko.Spec.DataProcessing.InputFilter) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.DataProcessing.JoinSource, b.ko.Spec.DataProcessing.JoinSource) { + delta.Add("Spec.DataProcessing.JoinSource", a.ko.Spec.DataProcessing.JoinSource, b.ko.Spec.DataProcessing.JoinSource) + } else if a.ko.Spec.DataProcessing.JoinSource != nil && b.ko.Spec.DataProcessing.JoinSource != nil { + if *a.ko.Spec.DataProcessing.JoinSource != *b.ko.Spec.DataProcessing.JoinSource { + delta.Add("Spec.DataProcessing.JoinSource", a.ko.Spec.DataProcessing.JoinSource, b.ko.Spec.DataProcessing.JoinSource) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.DataProcessing.OutputFilter, b.ko.Spec.DataProcessing.OutputFilter) { + delta.Add("Spec.DataProcessing.OutputFilter", a.ko.Spec.DataProcessing.OutputFilter, b.ko.Spec.DataProcessing.OutputFilter) + } else if a.ko.Spec.DataProcessing.OutputFilter != nil && b.ko.Spec.DataProcessing.OutputFilter != nil { + if *a.ko.Spec.DataProcessing.OutputFilter != *b.ko.Spec.DataProcessing.OutputFilter { + delta.Add("Spec.DataProcessing.OutputFilter", a.ko.Spec.DataProcessing.OutputFilter, b.ko.Spec.DataProcessing.OutputFilter) + } + } + } + if ackcompare.HasNilDifference(a.ko.Spec.Environment, b.ko.Spec.Environment) { + delta.Add("Spec.Environment", a.ko.Spec.Environment, b.ko.Spec.Environment) + } else if a.ko.Spec.Environment != nil && b.ko.Spec.Environment != nil { + if !ackcompare.MapStringStringPEqual(a.ko.Spec.Environment, b.ko.Spec.Environment) { + delta.Add("Spec.Environment", a.ko.Spec.Environment, b.ko.Spec.Environment) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.ExperimentConfig, b.ko.Spec.ExperimentConfig) { + delta.Add("Spec.ExperimentConfig", a.ko.Spec.ExperimentConfig, b.ko.Spec.ExperimentConfig) + } else if a.ko.Spec.ExperimentConfig != nil && b.ko.Spec.ExperimentConfig != nil { + if ackcompare.HasNilDifference(a.ko.Spec.ExperimentConfig.ExperimentName, b.ko.Spec.ExperimentConfig.ExperimentName) { + delta.Add("Spec.ExperimentConfig.ExperimentName", a.ko.Spec.ExperimentConfig.ExperimentName, b.ko.Spec.ExperimentConfig.ExperimentName) + } else if a.ko.Spec.ExperimentConfig.ExperimentName != nil && b.ko.Spec.ExperimentConfig.ExperimentName != nil { + if *a.ko.Spec.ExperimentConfig.ExperimentName != *b.ko.Spec.ExperimentConfig.ExperimentName { + delta.Add("Spec.ExperimentConfig.ExperimentName", a.ko.Spec.ExperimentConfig.ExperimentName, b.ko.Spec.ExperimentConfig.ExperimentName) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.ExperimentConfig.TrialComponentDisplayName, b.ko.Spec.ExperimentConfig.TrialComponentDisplayName) { + delta.Add("Spec.ExperimentConfig.TrialComponentDisplayName", a.ko.Spec.ExperimentConfig.TrialComponentDisplayName, b.ko.Spec.ExperimentConfig.TrialComponentDisplayName) + } else if a.ko.Spec.ExperimentConfig.TrialComponentDisplayName != nil && b.ko.Spec.ExperimentConfig.TrialComponentDisplayName != nil { + if *a.ko.Spec.ExperimentConfig.TrialComponentDisplayName != *b.ko.Spec.ExperimentConfig.TrialComponentDisplayName { + delta.Add("Spec.ExperimentConfig.TrialComponentDisplayName", a.ko.Spec.ExperimentConfig.TrialComponentDisplayName, b.ko.Spec.ExperimentConfig.TrialComponentDisplayName) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.ExperimentConfig.TrialName, b.ko.Spec.ExperimentConfig.TrialName) { + delta.Add("Spec.ExperimentConfig.TrialName", a.ko.Spec.ExperimentConfig.TrialName, b.ko.Spec.ExperimentConfig.TrialName) + } else if a.ko.Spec.ExperimentConfig.TrialName != nil && b.ko.Spec.ExperimentConfig.TrialName != nil { + if *a.ko.Spec.ExperimentConfig.TrialName != *b.ko.Spec.ExperimentConfig.TrialName { + delta.Add("Spec.ExperimentConfig.TrialName", a.ko.Spec.ExperimentConfig.TrialName, b.ko.Spec.ExperimentConfig.TrialName) + } + } + } + if ackcompare.HasNilDifference(a.ko.Spec.MaxConcurrentTransforms, b.ko.Spec.MaxConcurrentTransforms) { + delta.Add("Spec.MaxConcurrentTransforms", a.ko.Spec.MaxConcurrentTransforms, b.ko.Spec.MaxConcurrentTransforms) + } else if a.ko.Spec.MaxConcurrentTransforms != nil && b.ko.Spec.MaxConcurrentTransforms != nil { + if *a.ko.Spec.MaxConcurrentTransforms != *b.ko.Spec.MaxConcurrentTransforms { + delta.Add("Spec.MaxConcurrentTransforms", a.ko.Spec.MaxConcurrentTransforms, b.ko.Spec.MaxConcurrentTransforms) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.MaxPayloadInMB, b.ko.Spec.MaxPayloadInMB) { + delta.Add("Spec.MaxPayloadInMB", a.ko.Spec.MaxPayloadInMB, b.ko.Spec.MaxPayloadInMB) + } else if a.ko.Spec.MaxPayloadInMB != nil && b.ko.Spec.MaxPayloadInMB != nil { + if *a.ko.Spec.MaxPayloadInMB != *b.ko.Spec.MaxPayloadInMB { + delta.Add("Spec.MaxPayloadInMB", a.ko.Spec.MaxPayloadInMB, b.ko.Spec.MaxPayloadInMB) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.ModelClientConfig, b.ko.Spec.ModelClientConfig) { + delta.Add("Spec.ModelClientConfig", a.ko.Spec.ModelClientConfig, b.ko.Spec.ModelClientConfig) + } else if a.ko.Spec.ModelClientConfig != nil && b.ko.Spec.ModelClientConfig != nil { + if ackcompare.HasNilDifference(a.ko.Spec.ModelClientConfig.InvocationsMaxRetries, b.ko.Spec.ModelClientConfig.InvocationsMaxRetries) { + delta.Add("Spec.ModelClientConfig.InvocationsMaxRetries", a.ko.Spec.ModelClientConfig.InvocationsMaxRetries, b.ko.Spec.ModelClientConfig.InvocationsMaxRetries) + } else if a.ko.Spec.ModelClientConfig.InvocationsMaxRetries != nil && b.ko.Spec.ModelClientConfig.InvocationsMaxRetries != nil { + if *a.ko.Spec.ModelClientConfig.InvocationsMaxRetries != *b.ko.Spec.ModelClientConfig.InvocationsMaxRetries { + delta.Add("Spec.ModelClientConfig.InvocationsMaxRetries", a.ko.Spec.ModelClientConfig.InvocationsMaxRetries, b.ko.Spec.ModelClientConfig.InvocationsMaxRetries) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.ModelClientConfig.InvocationsTimeoutInSeconds, b.ko.Spec.ModelClientConfig.InvocationsTimeoutInSeconds) { + delta.Add("Spec.ModelClientConfig.InvocationsTimeoutInSeconds", a.ko.Spec.ModelClientConfig.InvocationsTimeoutInSeconds, b.ko.Spec.ModelClientConfig.InvocationsTimeoutInSeconds) + } else if a.ko.Spec.ModelClientConfig.InvocationsTimeoutInSeconds != nil && b.ko.Spec.ModelClientConfig.InvocationsTimeoutInSeconds != nil { + if *a.ko.Spec.ModelClientConfig.InvocationsTimeoutInSeconds != *b.ko.Spec.ModelClientConfig.InvocationsTimeoutInSeconds { + delta.Add("Spec.ModelClientConfig.InvocationsTimeoutInSeconds", a.ko.Spec.ModelClientConfig.InvocationsTimeoutInSeconds, b.ko.Spec.ModelClientConfig.InvocationsTimeoutInSeconds) + } + } + } + if ackcompare.HasNilDifference(a.ko.Spec.ModelName, b.ko.Spec.ModelName) { + delta.Add("Spec.ModelName", a.ko.Spec.ModelName, b.ko.Spec.ModelName) + } else if a.ko.Spec.ModelName != nil && b.ko.Spec.ModelName != nil { + if *a.ko.Spec.ModelName != *b.ko.Spec.ModelName { + delta.Add("Spec.ModelName", a.ko.Spec.ModelName, b.ko.Spec.ModelName) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.TransformInput, b.ko.Spec.TransformInput) { + delta.Add("Spec.TransformInput", a.ko.Spec.TransformInput, b.ko.Spec.TransformInput) + } else if a.ko.Spec.TransformInput != nil && b.ko.Spec.TransformInput != nil { + if ackcompare.HasNilDifference(a.ko.Spec.TransformInput.CompressionType, b.ko.Spec.TransformInput.CompressionType) { + delta.Add("Spec.TransformInput.CompressionType", a.ko.Spec.TransformInput.CompressionType, b.ko.Spec.TransformInput.CompressionType) + } else if a.ko.Spec.TransformInput.CompressionType != nil && b.ko.Spec.TransformInput.CompressionType != nil { + if *a.ko.Spec.TransformInput.CompressionType != *b.ko.Spec.TransformInput.CompressionType { + delta.Add("Spec.TransformInput.CompressionType", a.ko.Spec.TransformInput.CompressionType, b.ko.Spec.TransformInput.CompressionType) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.TransformInput.ContentType, b.ko.Spec.TransformInput.ContentType) { + delta.Add("Spec.TransformInput.ContentType", a.ko.Spec.TransformInput.ContentType, b.ko.Spec.TransformInput.ContentType) + } else if a.ko.Spec.TransformInput.ContentType != nil && b.ko.Spec.TransformInput.ContentType != nil { + if *a.ko.Spec.TransformInput.ContentType != *b.ko.Spec.TransformInput.ContentType { + delta.Add("Spec.TransformInput.ContentType", a.ko.Spec.TransformInput.ContentType, b.ko.Spec.TransformInput.ContentType) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.TransformInput.DataSource, b.ko.Spec.TransformInput.DataSource) { + delta.Add("Spec.TransformInput.DataSource", a.ko.Spec.TransformInput.DataSource, b.ko.Spec.TransformInput.DataSource) + } else if a.ko.Spec.TransformInput.DataSource != nil && b.ko.Spec.TransformInput.DataSource != nil { + if ackcompare.HasNilDifference(a.ko.Spec.TransformInput.DataSource.S3DataSource, b.ko.Spec.TransformInput.DataSource.S3DataSource) { + delta.Add("Spec.TransformInput.DataSource.S3DataSource", a.ko.Spec.TransformInput.DataSource.S3DataSource, b.ko.Spec.TransformInput.DataSource.S3DataSource) + } else if a.ko.Spec.TransformInput.DataSource.S3DataSource != nil && b.ko.Spec.TransformInput.DataSource.S3DataSource != nil { + if ackcompare.HasNilDifference(a.ko.Spec.TransformInput.DataSource.S3DataSource.S3DataType, b.ko.Spec.TransformInput.DataSource.S3DataSource.S3DataType) { + delta.Add("Spec.TransformInput.DataSource.S3DataSource.S3DataType", a.ko.Spec.TransformInput.DataSource.S3DataSource.S3DataType, b.ko.Spec.TransformInput.DataSource.S3DataSource.S3DataType) + } else if a.ko.Spec.TransformInput.DataSource.S3DataSource.S3DataType != nil && b.ko.Spec.TransformInput.DataSource.S3DataSource.S3DataType != nil { + if *a.ko.Spec.TransformInput.DataSource.S3DataSource.S3DataType != *b.ko.Spec.TransformInput.DataSource.S3DataSource.S3DataType { + delta.Add("Spec.TransformInput.DataSource.S3DataSource.S3DataType", a.ko.Spec.TransformInput.DataSource.S3DataSource.S3DataType, b.ko.Spec.TransformInput.DataSource.S3DataSource.S3DataType) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.TransformInput.DataSource.S3DataSource.S3URI, b.ko.Spec.TransformInput.DataSource.S3DataSource.S3URI) { + delta.Add("Spec.TransformInput.DataSource.S3DataSource.S3URI", a.ko.Spec.TransformInput.DataSource.S3DataSource.S3URI, b.ko.Spec.TransformInput.DataSource.S3DataSource.S3URI) + } else if a.ko.Spec.TransformInput.DataSource.S3DataSource.S3URI != nil && b.ko.Spec.TransformInput.DataSource.S3DataSource.S3URI != nil { + if *a.ko.Spec.TransformInput.DataSource.S3DataSource.S3URI != *b.ko.Spec.TransformInput.DataSource.S3DataSource.S3URI { + delta.Add("Spec.TransformInput.DataSource.S3DataSource.S3URI", a.ko.Spec.TransformInput.DataSource.S3DataSource.S3URI, b.ko.Spec.TransformInput.DataSource.S3DataSource.S3URI) + } + } + } + } + if ackcompare.HasNilDifference(a.ko.Spec.TransformInput.SplitType, b.ko.Spec.TransformInput.SplitType) { + delta.Add("Spec.TransformInput.SplitType", a.ko.Spec.TransformInput.SplitType, b.ko.Spec.TransformInput.SplitType) + } else if a.ko.Spec.TransformInput.SplitType != nil && b.ko.Spec.TransformInput.SplitType != nil { + if *a.ko.Spec.TransformInput.SplitType != *b.ko.Spec.TransformInput.SplitType { + delta.Add("Spec.TransformInput.SplitType", a.ko.Spec.TransformInput.SplitType, b.ko.Spec.TransformInput.SplitType) + } + } + } + if ackcompare.HasNilDifference(a.ko.Spec.TransformJobName, b.ko.Spec.TransformJobName) { + delta.Add("Spec.TransformJobName", a.ko.Spec.TransformJobName, b.ko.Spec.TransformJobName) + } else if a.ko.Spec.TransformJobName != nil && b.ko.Spec.TransformJobName != nil { + if *a.ko.Spec.TransformJobName != *b.ko.Spec.TransformJobName { + delta.Add("Spec.TransformJobName", a.ko.Spec.TransformJobName, b.ko.Spec.TransformJobName) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.TransformOutput, b.ko.Spec.TransformOutput) { + delta.Add("Spec.TransformOutput", a.ko.Spec.TransformOutput, b.ko.Spec.TransformOutput) + } else if a.ko.Spec.TransformOutput != nil && b.ko.Spec.TransformOutput != nil { + if ackcompare.HasNilDifference(a.ko.Spec.TransformOutput.Accept, b.ko.Spec.TransformOutput.Accept) { + delta.Add("Spec.TransformOutput.Accept", a.ko.Spec.TransformOutput.Accept, b.ko.Spec.TransformOutput.Accept) + } else if a.ko.Spec.TransformOutput.Accept != nil && b.ko.Spec.TransformOutput.Accept != nil { + if *a.ko.Spec.TransformOutput.Accept != *b.ko.Spec.TransformOutput.Accept { + delta.Add("Spec.TransformOutput.Accept", a.ko.Spec.TransformOutput.Accept, b.ko.Spec.TransformOutput.Accept) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.TransformOutput.AssembleWith, b.ko.Spec.TransformOutput.AssembleWith) { + delta.Add("Spec.TransformOutput.AssembleWith", a.ko.Spec.TransformOutput.AssembleWith, b.ko.Spec.TransformOutput.AssembleWith) + } else if a.ko.Spec.TransformOutput.AssembleWith != nil && b.ko.Spec.TransformOutput.AssembleWith != nil { + if *a.ko.Spec.TransformOutput.AssembleWith != *b.ko.Spec.TransformOutput.AssembleWith { + delta.Add("Spec.TransformOutput.AssembleWith", a.ko.Spec.TransformOutput.AssembleWith, b.ko.Spec.TransformOutput.AssembleWith) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.TransformOutput.KMSKeyID, b.ko.Spec.TransformOutput.KMSKeyID) { + delta.Add("Spec.TransformOutput.KMSKeyID", a.ko.Spec.TransformOutput.KMSKeyID, b.ko.Spec.TransformOutput.KMSKeyID) + } else if a.ko.Spec.TransformOutput.KMSKeyID != nil && b.ko.Spec.TransformOutput.KMSKeyID != nil { + if *a.ko.Spec.TransformOutput.KMSKeyID != *b.ko.Spec.TransformOutput.KMSKeyID { + delta.Add("Spec.TransformOutput.KMSKeyID", a.ko.Spec.TransformOutput.KMSKeyID, b.ko.Spec.TransformOutput.KMSKeyID) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.TransformOutput.S3OutputPath, b.ko.Spec.TransformOutput.S3OutputPath) { + delta.Add("Spec.TransformOutput.S3OutputPath", a.ko.Spec.TransformOutput.S3OutputPath, b.ko.Spec.TransformOutput.S3OutputPath) + } else if a.ko.Spec.TransformOutput.S3OutputPath != nil && b.ko.Spec.TransformOutput.S3OutputPath != nil { + if *a.ko.Spec.TransformOutput.S3OutputPath != *b.ko.Spec.TransformOutput.S3OutputPath { + delta.Add("Spec.TransformOutput.S3OutputPath", a.ko.Spec.TransformOutput.S3OutputPath, b.ko.Spec.TransformOutput.S3OutputPath) + } + } + } + if ackcompare.HasNilDifference(a.ko.Spec.TransformResources, b.ko.Spec.TransformResources) { + delta.Add("Spec.TransformResources", a.ko.Spec.TransformResources, b.ko.Spec.TransformResources) + } else if a.ko.Spec.TransformResources != nil && b.ko.Spec.TransformResources != nil { + if ackcompare.HasNilDifference(a.ko.Spec.TransformResources.InstanceCount, b.ko.Spec.TransformResources.InstanceCount) { + delta.Add("Spec.TransformResources.InstanceCount", a.ko.Spec.TransformResources.InstanceCount, b.ko.Spec.TransformResources.InstanceCount) + } else if a.ko.Spec.TransformResources.InstanceCount != nil && b.ko.Spec.TransformResources.InstanceCount != nil { + if *a.ko.Spec.TransformResources.InstanceCount != *b.ko.Spec.TransformResources.InstanceCount { + delta.Add("Spec.TransformResources.InstanceCount", a.ko.Spec.TransformResources.InstanceCount, b.ko.Spec.TransformResources.InstanceCount) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.TransformResources.InstanceType, b.ko.Spec.TransformResources.InstanceType) { + delta.Add("Spec.TransformResources.InstanceType", a.ko.Spec.TransformResources.InstanceType, b.ko.Spec.TransformResources.InstanceType) + } else if a.ko.Spec.TransformResources.InstanceType != nil && b.ko.Spec.TransformResources.InstanceType != nil { + if *a.ko.Spec.TransformResources.InstanceType != *b.ko.Spec.TransformResources.InstanceType { + delta.Add("Spec.TransformResources.InstanceType", a.ko.Spec.TransformResources.InstanceType, b.ko.Spec.TransformResources.InstanceType) + } + } + if ackcompare.HasNilDifference(a.ko.Spec.TransformResources.VolumeKMSKeyID, b.ko.Spec.TransformResources.VolumeKMSKeyID) { + delta.Add("Spec.TransformResources.VolumeKMSKeyID", a.ko.Spec.TransformResources.VolumeKMSKeyID, b.ko.Spec.TransformResources.VolumeKMSKeyID) + } else if a.ko.Spec.TransformResources.VolumeKMSKeyID != nil && b.ko.Spec.TransformResources.VolumeKMSKeyID != nil { + if *a.ko.Spec.TransformResources.VolumeKMSKeyID != *b.ko.Spec.TransformResources.VolumeKMSKeyID { + delta.Add("Spec.TransformResources.VolumeKMSKeyID", a.ko.Spec.TransformResources.VolumeKMSKeyID, b.ko.Spec.TransformResources.VolumeKMSKeyID) + } + } + } + + return delta +} diff --git a/pkg/resource/transform_job/descriptor.go b/pkg/resource/transform_job/descriptor.go index 75dc6f99..80f8dd73 100644 --- a/pkg/resource/transform_job/descriptor.go +++ b/pkg/resource/transform_job/descriptor.go @@ -18,8 +18,6 @@ package transform_job import ( ackcompare "github.com/aws-controllers-k8s/runtime/pkg/compare" acktypes "github.com/aws-controllers-k8s/runtime/pkg/types" - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" k8sapirt "k8s.io/apimachinery/pkg/runtime" k8sctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -65,37 +63,10 @@ func (d *resourceDescriptor) ResourceFromRuntimeObject( } } -// Equal returns true if the two supplied AWSResources have the same content. -// The underlying types of the two supplied AWSResources should be the same. In -// other words, the Equal() method should be called with the same concrete -// implementing AWSResource type -func (d *resourceDescriptor) Equal( - a acktypes.AWSResource, - b acktypes.AWSResource, -) bool { - ac := a.(*resource) - bc := b.(*resource) - opts := []cmp.Option{cmpopts.EquateEmpty()} - return cmp.Equal(ac.ko, bc.ko, opts...) -} - -// Diff returns a Reporter which provides the difference between two supplied -// AWSResources. The underlying types of the two supplied AWSResources should -// be the same. In other words, the Diff() method should be called with the -// same concrete implementing AWSResource type -func (d *resourceDescriptor) Diff( - a acktypes.AWSResource, - b acktypes.AWSResource, -) *ackcompare.Reporter { - ac := a.(*resource) - bc := b.(*resource) - var diffReporter ackcompare.Reporter - opts := []cmp.Option{ - cmp.Reporter(&diffReporter), - cmp.AllowUnexported(svcapitypes.TransformJob{}), - } - cmp.Equal(ac.ko, bc.ko, opts...) - return &diffReporter +// Delta returns an `ackcompare.Delta` object containing the difference between +// one `AWSResource` and another. +func (d *resourceDescriptor) Delta(a, b acktypes.AWSResource) *ackcompare.Delta { + return newResourceDelta(a.(*resource), b.(*resource)) } // UpdateCRStatus accepts an AWSResource object and changes the Status diff --git a/pkg/resource/transform_job/manager.go b/pkg/resource/transform_job/manager.go index 0ac488c0..9685dfb5 100644 --- a/pkg/resource/transform_job/manager.go +++ b/pkg/resource/transform_job/manager.go @@ -125,7 +125,7 @@ func (rm *resourceManager) Update( ctx context.Context, resDesired acktypes.AWSResource, resLatest acktypes.AWSResource, - diffReporter *ackcompare.Reporter, + delta *ackcompare.Delta, ) (acktypes.AWSResource, error) { desired := rm.concreteResource(resDesired) latest := rm.concreteResource(resLatest) @@ -133,7 +133,7 @@ func (rm *resourceManager) Update( // Should never happen... if it does, it's buggy code. panic("resource manager's Update() method received resource with nil CR object") } - updated, err := rm.sdkUpdate(ctx, desired, latest, diffReporter) + updated, err := rm.sdkUpdate(ctx, desired, latest, delta) if err != nil { return rm.onError(latest, err) } diff --git a/pkg/resource/transform_job/resource.go b/pkg/resource/transform_job/resource.go index 697301b8..bd954c38 100644 --- a/pkg/resource/transform_job/resource.go +++ b/pkg/resource/transform_job/resource.go @@ -24,7 +24,7 @@ import ( svcapitypes "github.com/aws-controllers-k8s/sagemaker-controller/apis/v1alpha1" ) -// resource implements the `aws-service-operator-k8s/pkg/types.AWSResource` +// resource implements the `aws-controller-k8s/runtime/pkg/types.AWSResource` // interface type resource struct { // The Kubernetes-native CR representing the resource diff --git a/pkg/resource/transform_job/sdk.go b/pkg/resource/transform_job/sdk.go index 3f4fcc16..0ae9b5dc 100644 --- a/pkg/resource/transform_job/sdk.go +++ b/pkg/resource/transform_job/sdk.go @@ -73,6 +73,8 @@ func (rm *resourceManager) sdkFind( if resp.BatchStrategy != nil { ko.Spec.BatchStrategy = resp.BatchStrategy + } else { + ko.Spec.BatchStrategy = nil } if resp.DataProcessing != nil { f3 := &svcapitypes.DataProcessing{} @@ -86,6 +88,8 @@ func (rm *resourceManager) sdkFind( f3.OutputFilter = resp.DataProcessing.OutputFilter } ko.Spec.DataProcessing = f3 + } else { + ko.Spec.DataProcessing = nil } if resp.Environment != nil { f4 := map[string]*string{} @@ -95,6 +99,8 @@ func (rm *resourceManager) sdkFind( f4[f4key] = &f4val } ko.Spec.Environment = f4 + } else { + ko.Spec.Environment = nil } if resp.ExperimentConfig != nil { f5 := &svcapitypes.ExperimentConfig{} @@ -108,15 +114,23 @@ func (rm *resourceManager) sdkFind( f5.TrialName = resp.ExperimentConfig.TrialName } ko.Spec.ExperimentConfig = f5 + } else { + ko.Spec.ExperimentConfig = nil } if resp.FailureReason != nil { ko.Status.FailureReason = resp.FailureReason + } else { + ko.Status.FailureReason = nil } if resp.MaxConcurrentTransforms != nil { ko.Spec.MaxConcurrentTransforms = resp.MaxConcurrentTransforms + } else { + ko.Spec.MaxConcurrentTransforms = nil } if resp.MaxPayloadInMB != nil { ko.Spec.MaxPayloadInMB = resp.MaxPayloadInMB + } else { + ko.Spec.MaxPayloadInMB = nil } if resp.ModelClientConfig != nil { f10 := &svcapitypes.ModelClientConfig{} @@ -127,9 +141,13 @@ func (rm *resourceManager) sdkFind( f10.InvocationsTimeoutInSeconds = resp.ModelClientConfig.InvocationsTimeoutInSeconds } ko.Spec.ModelClientConfig = f10 + } else { + ko.Spec.ModelClientConfig = nil } if resp.ModelName != nil { ko.Spec.ModelName = resp.ModelName + } else { + ko.Spec.ModelName = nil } if resp.TransformInput != nil { f13 := &svcapitypes.TransformInput{} @@ -157,6 +175,8 @@ func (rm *resourceManager) sdkFind( f13.SplitType = resp.TransformInput.SplitType } ko.Spec.TransformInput = f13 + } else { + ko.Spec.TransformInput = nil } if ko.Status.ACKResourceMetadata == nil { ko.Status.ACKResourceMetadata = &ackv1alpha1.ResourceMetadata{} @@ -167,9 +187,13 @@ func (rm *resourceManager) sdkFind( } if resp.TransformJobName != nil { ko.Spec.TransformJobName = resp.TransformJobName + } else { + ko.Spec.TransformJobName = nil } if resp.TransformJobStatus != nil { ko.Status.TransformJobStatus = resp.TransformJobStatus + } else { + ko.Status.TransformJobStatus = nil } if resp.TransformOutput != nil { f17 := &svcapitypes.TransformOutput{} @@ -186,6 +210,8 @@ func (rm *resourceManager) sdkFind( f17.S3OutputPath = resp.TransformOutput.S3OutputPath } ko.Spec.TransformOutput = f17 + } else { + ko.Spec.TransformOutput = nil } if resp.TransformResources != nil { f18 := &svcapitypes.TransformResources{} @@ -199,6 +225,8 @@ func (rm *resourceManager) sdkFind( f18.VolumeKMSKeyID = resp.TransformResources.VolumeKmsKeyId } ko.Spec.TransformResources = f18 + } else { + ko.Spec.TransformResources = nil } rm.setStatusDefaults(ko) @@ -328,78 +356,64 @@ func (rm *resourceManager) newCreateRequestPayload( if r.ko.Spec.ModelName != nil { res.SetModelName(*r.ko.Spec.ModelName) } - if r.ko.Spec.Tags != nil { - f8 := []*svcsdk.Tag{} - for _, f8iter := range r.ko.Spec.Tags { - f8elem := &svcsdk.Tag{} - if f8iter.Key != nil { - f8elem.SetKey(*f8iter.Key) - } - if f8iter.Value != nil { - f8elem.SetValue(*f8iter.Value) - } - f8 = append(f8, f8elem) - } - res.SetTags(f8) - } if r.ko.Spec.TransformInput != nil { - f9 := &svcsdk.TransformInput{} + f8 := &svcsdk.TransformInput{} if r.ko.Spec.TransformInput.CompressionType != nil { - f9.SetCompressionType(*r.ko.Spec.TransformInput.CompressionType) + f8.SetCompressionType(*r.ko.Spec.TransformInput.CompressionType) } if r.ko.Spec.TransformInput.ContentType != nil { - f9.SetContentType(*r.ko.Spec.TransformInput.ContentType) + f8.SetContentType(*r.ko.Spec.TransformInput.ContentType) } if r.ko.Spec.TransformInput.DataSource != nil { - f9f2 := &svcsdk.TransformDataSource{} + f8f2 := &svcsdk.TransformDataSource{} if r.ko.Spec.TransformInput.DataSource.S3DataSource != nil { - f9f2f0 := &svcsdk.TransformS3DataSource{} + f8f2f0 := &svcsdk.TransformS3DataSource{} if r.ko.Spec.TransformInput.DataSource.S3DataSource.S3DataType != nil { - f9f2f0.SetS3DataType(*r.ko.Spec.TransformInput.DataSource.S3DataSource.S3DataType) + f8f2f0.SetS3DataType(*r.ko.Spec.TransformInput.DataSource.S3DataSource.S3DataType) } if r.ko.Spec.TransformInput.DataSource.S3DataSource.S3URI != nil { - f9f2f0.SetS3Uri(*r.ko.Spec.TransformInput.DataSource.S3DataSource.S3URI) + f8f2f0.SetS3Uri(*r.ko.Spec.TransformInput.DataSource.S3DataSource.S3URI) } - f9f2.SetS3DataSource(f9f2f0) + f8f2.SetS3DataSource(f8f2f0) } - f9.SetDataSource(f9f2) + f8.SetDataSource(f8f2) } if r.ko.Spec.TransformInput.SplitType != nil { - f9.SetSplitType(*r.ko.Spec.TransformInput.SplitType) + f8.SetSplitType(*r.ko.Spec.TransformInput.SplitType) } - res.SetTransformInput(f9) + res.SetTransformInput(f8) } if r.ko.Spec.TransformJobName != nil { res.SetTransformJobName(*r.ko.Spec.TransformJobName) } if r.ko.Spec.TransformOutput != nil { - f11 := &svcsdk.TransformOutput{} + f10 := &svcsdk.TransformOutput{} if r.ko.Spec.TransformOutput.Accept != nil { - f11.SetAccept(*r.ko.Spec.TransformOutput.Accept) + f10.SetAccept(*r.ko.Spec.TransformOutput.Accept) } if r.ko.Spec.TransformOutput.AssembleWith != nil { - f11.SetAssembleWith(*r.ko.Spec.TransformOutput.AssembleWith) + f10.SetAssembleWith(*r.ko.Spec.TransformOutput.AssembleWith) } if r.ko.Spec.TransformOutput.KMSKeyID != nil { - f11.SetKmsKeyId(*r.ko.Spec.TransformOutput.KMSKeyID) + f10.SetKmsKeyId(*r.ko.Spec.TransformOutput.KMSKeyID) } if r.ko.Spec.TransformOutput.S3OutputPath != nil { - f11.SetS3OutputPath(*r.ko.Spec.TransformOutput.S3OutputPath) + f10.SetS3OutputPath(*r.ko.Spec.TransformOutput.S3OutputPath) } - res.SetTransformOutput(f11) + res.SetTransformOutput(f10) } if r.ko.Spec.TransformResources != nil { - f12 := &svcsdk.TransformResources{} + f11 := &svcsdk.TransformResources{} if r.ko.Spec.TransformResources.InstanceCount != nil { - f12.SetInstanceCount(*r.ko.Spec.TransformResources.InstanceCount) + f11.SetInstanceCount(*r.ko.Spec.TransformResources.InstanceCount) } if r.ko.Spec.TransformResources.InstanceType != nil { - f12.SetInstanceType(*r.ko.Spec.TransformResources.InstanceType) + f11.SetInstanceType(*r.ko.Spec.TransformResources.InstanceType) } if r.ko.Spec.TransformResources.VolumeKMSKeyID != nil { - f12.SetVolumeKmsKeyId(*r.ko.Spec.TransformResources.VolumeKMSKeyID) + f11.SetVolumeKmsKeyId(*r.ko.Spec.TransformResources.VolumeKMSKeyID) } - res.SetTransformResources(f12) + res.SetTransformResources(f11) } return res, nil @@ -411,7 +425,7 @@ func (rm *resourceManager) sdkUpdate( ctx context.Context, desired *resource, latest *resource, - diffReporter *ackcompare.Reporter, + delta *ackcompare.Delta, ) (*resource, error) { // TODO(jaypipes): Figure this out... return nil, ackerr.NotImplemented