diff --git a/api/bases/test.openstack.org_ansibletests.yaml b/api/bases/test.openstack.org_ansibletests.yaml new file mode 100644 index 00000000..5fb0c8c3 --- /dev/null +++ b/api/bases/test.openstack.org_ansibletests.yaml @@ -0,0 +1,288 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.1 + creationTimestamp: null + name: ansibletests.test.openstack.org +spec: + group: test.openstack.org + names: + kind: AnsibleTests + listKind: AnsibleTestsList + plural: ansibletests + singular: ansibletests + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: AnsibleTestsStatus is the Schema for the AnsibleTestsStatus API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: AnsibleTestsSpec defines the desired state of AnsibleTests + properties: + ansibleCollections: + default: "" + description: AnsibleCollections - extra ansible collections to instal + in additionn to the ones exist in the requirements.yaml + type: string + ansibleExtraVars: + default: "" + description: AnsibleExtraVars - string to pass parameters to ansible + using + type: string + ansibleGitRepo: + default: "" + description: AnsibleGitRepo - git repo to clone into container + type: string + ansibleInventory: + default: "" + description: AnsibleInventory - string that contains the inventory + file content + type: string + ansiblePlaybookPath: + default: "" + description: AnsiblePlaybookPath - path to ansible playbook + type: string + ansibleVarFiles: + default: "" + description: AnsibleVarFiles - interface to create ansible var files + Those get added to the + type: string + backoffLimit: + default: 0 + description: BackoffLimimt allows to define the maximum number of + retried executions (defaults to 6). + format: int32 + type: integer + computeSSHKeySecretName: + default: dataplane-ansible-ssh-private-key-secret + description: ComputeSSHKeySecretName is the name of the k8s secret + that contains an ssh key for computes. The key is mounted to ~/.ssh/id_ecdsa + in the ansible pod + type: string + containerImage: + default: quay.io/podified-antelope-centos9/openstack-ansible-tests:current-podified + description: Container image for AnsibleTests + type: string + debug: + default: false + description: Run ansible playbook with -vvvv + type: boolean + extraMounts: + description: Extra configmaps for mounting in the pod. + items: + properties: + Name: + description: The name of an existing config map for mounting. + type: string + mountPath: + description: Path within the container at which the volume should + be mounted. + type: string + subPath: + default: "" + description: Config map subpath for mounting, defaults to configmap + root. + type: string + required: + - Name + - mountPath + - subPath + type: object + type: array + openStackConfigMap: + default: openstack-config + description: OpenStackConfigMap is the name of the ConfigMap containing + the clouds.yaml + type: string + openStackConfigSecret: + default: openstack-config-secret + description: OpenStackConfigSecret is the name of the Secret containing + the secure.yaml + type: string + storageClass: + default: local-storage + description: StorageClass used to create PVCs that store the logs + type: string + workflow: + description: A parameter that contains a workflow definition. + items: + properties: + ansibleCollections: + description: AnsibleCollections - extra ansible collections + to instal in additionn to the ones exist in the requirements.yaml + type: string + ansibleExtraVars: + description: AnsibleExtraVars - interface to pass parameters + to ansible using -e + type: string + ansibleGitRepo: + description: AnsibleGitRepo - git repo to clone into container + type: string + ansibleInventory: + description: AnsibleInventory - string that contains the inventory + file content + type: string + ansiblePlaybookPath: + description: AnsiblePlaybookPath - path to ansible playbook + type: string + ansibleVarFiles: + description: AnsibleVarFiles - interface to create ansible var + files Those get added to the service config dir in /etc/test_operator/ + and passed to the ansible command using -e @/etc/test_operator/ + type: string + backoffLimit: + description: BackoffLimimt allows to define the maximum number + of retried executions (defaults to 6). + format: int32 + type: integer + computeSSHKeySecretName: + description: ComputeSSHKeySecretName is the name of the k8s + secret that contains an ssh key for computes. The key is mounted + to ~/.ssh/id_ecdsa in the ansible pod + type: string + containerImage: + description: Container image for AnsibleTests + type: string + debug: + description: Run ansible playbook with -vvvv + type: boolean + extraMounts: + description: Extra configmaps for mounting in the pod + items: + properties: + Name: + description: The name of an existing config map for mounting. + type: string + mountPath: + description: Path within the container at which the volume + should be mounted. + type: string + subPath: + default: "" + description: Config map subpath for mounting, defaults + to configmap root. + type: string + required: + - Name + - mountPath + - subPath + type: object + type: array + openStackConfigMap: + description: OpenStackConfigMap is the name of the ConfigMap + containing the clouds.yaml + type: string + openStackConfigSecret: + description: OpenStackConfigSecret is the name of the Secret + containing the secure.yaml + type: string + stepName: + description: Name of a workflow step. The step name will be + used for example to create a logs directory. + type: string + storageClass: + description: StorageClass used to create PVCs that store the + logs + type: string + workloadSSHKeySecretName: + description: WorkloadSSHKeySecretName is the name of the k8s + secret that contains an ssh key for the ansible workload. + The key is mounted to ~/test_keypair.key in the ansible pod + type: string + required: + - stepName + type: object + type: array + workloadSSHKeySecretName: + default: "" + description: WorkloadSSHKeySecretName is the name of the k8s secret + that contains an ssh key for the ansible workload. The key is mounted + to ~/test_keypair.key in the ansible pod + type: string + required: + - computeSSHKeySecretName + - openStackConfigMap + - openStackConfigSecret + - storageClass + - workloadSSHKeySecretName + type: object + status: + description: AnsibleTestsStatus defines the observed state of AnsibleTests + properties: + conditions: + description: Conditions + items: + description: Condition defines an observation of a API resource + operational state. + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status + to another. This should be when the underlying condition changed. + If that is not known, then using the time when the API field + changed is acceptable. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition + in CamelCase. + type: string + severity: + description: Severity provides a classification of Reason code, + so the current situation is immediately understandable and + could act accordingly. It is meant for situations where Status=False + and it should be indicated if it is just informational, warning + (next reconciliation might fix it) or an error (e.g. DB create + issue and no actions to automatically resolve the issue can/should + be done). For conditions where Status=Unknown or Status=True + the Severity should be SeverityNone. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition in CamelCase. + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + hash: + additionalProperties: + type: string + description: Map of hashes to track e.g. job status + type: object + networkAttachments: + additionalProperties: + items: + type: string + type: array + description: NetworkAttachments status of the deployment pods + type: object + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/api/v1beta1/AnsibleTests_types.go b/api/v1beta1/AnsibleTests_types.go new file mode 100644 index 00000000..a8106393 --- /dev/null +++ b/api/v1beta1/AnsibleTests_types.go @@ -0,0 +1,284 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "github.com/openstack-k8s-operators/lib-common/modules/common/condition" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +type extraConfigmapsMounts struct { + // +operator-sdk:csv:customresourcedefinitions:type=spec + // +kubebuilder:validation:Required + // The name of an existing config map for mounting. + Name string `json:"Name"` + + // +operator-sdk:csv:customresourcedefinitions:type=spec + // +kubebuilder:validation:Required + // Path within the container at which the volume should be mounted. + MountPath string `json:"mountPath"` + + // +operator-sdk:csv:customresourcedefinitions:type=spec + // +kubebuilder:validation:optional + // +kubebuilder:default="" + // Config map subpath for mounting, defaults to configmap root. + SubPath string `json:"subPath"` +} + +// AnsibleTestsSpec defines the desired state of AnsibleTests +type AnsibleTestsSpec struct { + // +operator-sdk:csv:customresourcedefinitions:type=spec + // +kubebuilder:validation:Optional + // Extra configmaps for mounting in the pod. + ExtraMounts []extraConfigmapsMounts `json:"extraMounts"` + + // +operator-sdk:csv:customresourcedefinitions:type=spec + // +kubebuilder:validation:Required + // +kubebuilder:default="local-storage" + // StorageClass used to create PVCs that store the logs + StorageClass string `json:"storageClass"` + + // +operator-sdk:csv:customresourcedefinitions:type=spec + // +kubebuilder:validation:Required + // +kubebuilder:default="dataplane-ansible-ssh-private-key-secret" + // ComputeSSHKeySecretName is the name of the k8s secret that contains an ssh key for computes. + // The key is mounted to ~/.ssh/id_ecdsa in the ansible pod + ComputesSSHKeySecretName string `json:"computeSSHKeySecretName"` + + // +operator-sdk:csv:customresourcedefinitions:type=spec + // +kubebuilder:validation:Required + // +kubebuilder:default="" + // WorkloadSSHKeySecretName is the name of the k8s secret that contains an ssh key for the ansible workload. + // The key is mounted to ~/test_keypair.key in the ansible pod + WorkloadSSHKeySecretName string `json:"workloadSSHKeySecretName"` + + // +operator-sdk:csv:customresourcedefinitions:type=spec + // +kubebuilder:validation:Required + // +kubebuilder:default="" + // AnsibleGitRepo - git repo to clone into container + AnsibleGitRepo string `json:"ansibleGitRepo,omitempty"` + + // +operator-sdk:csv:customresourcedefinitions:type=spec + // +kubebuilder:validation:Required + // +kubebuilder:default="" + // AnsiblePlaybookPath - path to ansible playbook + AnsiblePlaybookPath string `json:"ansiblePlaybookPath,omitempty"` + + // +operator-sdk:csv:customresourcedefinitions:type=spec + // +kubebuilder:validation:optional + // +kubebuilder:default="" + // AnsibleCollections - extra ansible collections to instal in additionn to the ones exist in the requirements.yaml + AnsibleCollections string `json:"ansibleCollections,omitempty"` + + // +operator-sdk:csv:customresourcedefinitions:type=spec + // +kubebuilder:validation:optional + // +kubebuilder:default="" + // AnsibleVarFiles - interface to create ansible var files Those get added to the + AnsibleVarFiles string `json:"ansibleVarFiles,omitempty"` + + // +operator-sdk:csv:customresourcedefinitions:type=spec + // +kubebuilder:validation:optional + // +kubebuilder:default="" + // AnsibleExtraVars - string to pass parameters to ansible using + AnsibleExtraVars string `json:"ansibleExtraVars,omitempty"` + + // +operator-sdk:csv:customresourcedefinitions:type=spec + // +kubebuilder:validation:optional + // +kubebuilder:default="" + // AnsibleInventory - string that contains the inventory file content + AnsibleInventory string `json:"ansibleInventory,omitempty"` + + // +operator-sdk:csv:customresourcedefinitions:type=spec + // +kubebuilder:validation:Required + // +kubebuilder:default=openstack-config + // OpenStackConfigMap is the name of the ConfigMap containing the clouds.yaml + OpenStackConfigMap string `json:"openStackConfigMap"` + + // +operator-sdk:csv:customresourcedefinitions:type=spec + // +kubebuilder:validation:Required + // +kubebuilder:default=openstack-config-secret + // OpenStackConfigSecret is the name of the Secret containing the secure.yaml + OpenStackConfigSecret string `json:"openStackConfigSecret"` + + // +operator-sdk:csv:customresourcedefinitions:type=spec + // +kubebuilder:validation:Optional + // +kubebuilder:default:=false + // Run ansible playbook with -vvvv + Debug bool `json:"debug,omitempty"` + + // +operator-sdk:csv:customresourcedefinitions:type=spec + // +kubebuilder:validation:Optional + // +kubebuilder:default:="quay.io/podified-antelope-centos9/openstack-ansible-tests:current-podified" + // Container image for AnsibleTests + ContainerImage string `json:"containerImage,omitempty"` + + // +operator-sdk:csv:customresourcedefinitions:type=spec + // BackoffLimimt allows to define the maximum number of retried executions (defaults to 6). + // +kubebuilder:default:=0 + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:number"} + BackoffLimit *int32 `json:"backoffLimit,omitempty"` + + // +operator-sdk:csv:customresourcedefinitions:type=spec + // A parameter that contains a workflow definition. + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:number"} + Workflow []AnsibleTestsWorkflowSpec `json:"workflow,omitempty"` +} + +type AnsibleTestsWorkflowSpec struct { + // +operator-sdk:csv:customresourcedefinitions:type=spec + // +kubebuilder:validation:Optional + // Extra configmaps for mounting in the pod + ExtraMounts []extraConfigmapsMounts `json:"extraMounts"` + + // +operator-sdk:csv:customresourcedefinitions:type=spec + // +kubebuilder:validation:Required + // Name of a workflow step. The step name will be used for example to create + // a logs directory. + StepName string `json:"stepName"` + + // +operator-sdk:csv:customresourcedefinitions:type=spec + // +kubebuilder:validation:Optional + // StorageClass used to create PVCs that store the logs + StorageClass *string `json:"storageClass"` + + // +operator-sdk:csv:customresourcedefinitions:type=spec + // +kubebuilder:validation:Optional + // ComputeSSHKeySecretName is the name of the k8s secret that contains an ssh key for computes. + // The key is mounted to ~/.ssh/id_ecdsa in the ansible pod + ComputesSSHKeySecretName string `json:"computeSSHKeySecretName"` + + // +operator-sdk:csv:customresourcedefinitions:type=spec + // +kubebuilder:validation:Optional + // WorkloadSSHKeySecretName is the name of the k8s secret that contains an ssh key for the ansible workload. + // The key is mounted to ~/test_keypair.key in the ansible pod + WorkloadSSHKeySecretName string `json:"workloadSSHKeySecretName"` + + // +operator-sdk:csv:customresourcedefinitions:type=spec + // +kubebuilder:validation:Optional + // AnsibleGitRepo - git repo to clone into container + AnsibleGitRepo string `json:"ansibleGitRepo,omitempty"` + + // +operator-sdk:csv:customresourcedefinitions:type=spec + // +kubebuilder:validation:Optional + // AnsiblePlaybookPath - path to ansible playbook + AnsiblePlaybookPath string `json:"ansiblePlaybookPath,omitempty"` + + // +operator-sdk:csv:customresourcedefinitions:type=spec + // +kubebuilder:validation:optional + // AnsibleCollections - extra ansible collections to instal in additionn to the ones exist in the requirements.yaml + AnsibleCollections string `json:"ansibleCollections,omitempty"` + + // +operator-sdk:csv:customresourcedefinitions:type=spec + // +kubebuilder:validation:optional + // AnsibleVarFiles - interface to create ansible var files Those get added to the + // service config dir in /etc/test_operator/ and passed to the ansible command using -e @/etc/test_operator/ + AnsibleVarFiles string `json:"ansibleVarFiles,omitempty"` + + // +operator-sdk:csv:customresourcedefinitions:type=spec + // +kubebuilder:validation:optional + // AnsibleExtraVars - interface to pass parameters to ansible using -e + AnsibleExtraVars string `json:"ansibleExtraVars,omitempty"` + + // +operator-sdk:csv:customresourcedefinitions:type=spec + // +kubebuilder:validation:optional + // AnsibleInventory - string that contains the inventory file content + AnsibleInventory string `json:"ansibleInventory,omitempty"` + + // +operator-sdk:csv:customresourcedefinitions:type=spec + // +kubebuilder:validation:Optional + // OpenStackConfigMap is the name of the ConfigMap containing the clouds.yaml + OpenStackConfigMap *string `json:"openStackConfigMap"` + + // +operator-sdk:csv:customresourcedefinitions:type=spec + // +kubebuilder:validation:Optional + // OpenStackConfigSecret is the name of the Secret containing the secure.yaml + OpenStackConfigSecret *string `json:"openStackConfigSecret"` + + // +operator-sdk:csv:customresourcedefinitions:type=spec + // +kubebuilder:validation:Optional + // Run ansible playbook with -vvvv + Debug bool `json:"debug,omitempty"` + + // +operator-sdk:csv:customresourcedefinitions:type=spec + // +kubebuilder:validation:Optional + // Container image for AnsibleTests + ContainerImage string `json:"containerImage,omitempty"` + + // +operator-sdk:csv:customresourcedefinitions:type=spec + // BackoffLimimt allows to define the maximum number of retried executions (defaults to 6). + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:number"} + BackoffLimit *int32 `json:"backoffLimit,omitempty"` +} + +// AnsibleTestsStatus defines the observed state of AnsibleTests +type AnsibleTestsStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // Map of hashes to track e.g. job status + Hash map[string]string `json:"hash,omitempty"` + + // Conditions + Conditions condition.Conditions `json:"conditions,omitempty" optional:"true"` + + // NetworkAttachments status of the deployment pods + NetworkAttachments map[string][]string `json:"networkAttachments,omitempty"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status + +// AnsibleTestsStatus is the Schema for the AnsibleTestsStatus API +type AnsibleTests struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec AnsibleTestsSpec `json:"spec,omitempty"` + Status AnsibleTestsStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// AnsibleTestsList contains a list of AnsibleTests +type AnsibleTestsList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AnsibleTests `json:"items"` +} + +func init() { + SchemeBuilder.Register(&AnsibleTests{}, &AnsibleTestsList{}) +} + +// RbacConditionsSet - set the conditions for the rbac object +func (instance AnsibleTests) RbacConditionsSet(c *condition.Condition) { + instance.Status.Conditions.Set(c) +} + +// RbacNamespace - return the namespace +func (instance AnsibleTests) RbacNamespace() string { + return instance.Namespace +} + +// RbacResourceName - return the name to be used for rbac objects (serviceaccount, role, rolebinding) +func (instance AnsibleTests) RbacResourceName() string { + return instance.Name +} diff --git a/api/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go index 745897c6..be3cde1d 100644 --- a/api/v1beta1/zz_generated.deepcopy.go +++ b/api/v1beta1/zz_generated.deepcopy.go @@ -27,6 +27,181 @@ import ( "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnsibleTests) DeepCopyInto(out *AnsibleTests) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnsibleTests. +func (in *AnsibleTests) DeepCopy() *AnsibleTests { + if in == nil { + return nil + } + out := new(AnsibleTests) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AnsibleTests) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnsibleTestsList) DeepCopyInto(out *AnsibleTestsList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AnsibleTests, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnsibleTestsList. +func (in *AnsibleTestsList) DeepCopy() *AnsibleTestsList { + if in == nil { + return nil + } + out := new(AnsibleTestsList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AnsibleTestsList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnsibleTestsSpec) DeepCopyInto(out *AnsibleTestsSpec) { + *out = *in + if in.ExtraMounts != nil { + in, out := &in.ExtraMounts, &out.ExtraMounts + *out = make([]extraConfigmapsMounts, len(*in)) + copy(*out, *in) + } + if in.BackoffLimit != nil { + in, out := &in.BackoffLimit, &out.BackoffLimit + *out = new(int32) + **out = **in + } + if in.Workflow != nil { + in, out := &in.Workflow, &out.Workflow + *out = make([]AnsibleTestsWorkflowSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnsibleTestsSpec. +func (in *AnsibleTestsSpec) DeepCopy() *AnsibleTestsSpec { + if in == nil { + return nil + } + out := new(AnsibleTestsSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnsibleTestsStatus) DeepCopyInto(out *AnsibleTestsStatus) { + *out = *in + if in.Hash != nil { + in, out := &in.Hash, &out.Hash + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(condition.Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkAttachments != nil { + in, out := &in.NetworkAttachments, &out.NetworkAttachments + *out = make(map[string][]string, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnsibleTestsStatus. +func (in *AnsibleTestsStatus) DeepCopy() *AnsibleTestsStatus { + if in == nil { + return nil + } + out := new(AnsibleTestsStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnsibleTestsWorkflowSpec) DeepCopyInto(out *AnsibleTestsWorkflowSpec) { + *out = *in + if in.ExtraMounts != nil { + in, out := &in.ExtraMounts, &out.ExtraMounts + *out = make([]extraConfigmapsMounts, len(*in)) + copy(*out, *in) + } + if in.StorageClass != nil { + in, out := &in.StorageClass, &out.StorageClass + *out = new(string) + **out = **in + } + if in.OpenStackConfigMap != nil { + in, out := &in.OpenStackConfigMap, &out.OpenStackConfigMap + *out = new(string) + **out = **in + } + if in.OpenStackConfigSecret != nil { + in, out := &in.OpenStackConfigSecret, &out.OpenStackConfigSecret + *out = new(string) + **out = **in + } + if in.BackoffLimit != nil { + in, out := &in.BackoffLimit, &out.BackoffLimit + *out = new(int32) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnsibleTestsWorkflowSpec. +func (in *AnsibleTestsWorkflowSpec) DeepCopy() *AnsibleTestsWorkflowSpec { + if in == nil { + return nil + } + out := new(AnsibleTestsWorkflowSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExternalPluginType) DeepCopyInto(out *ExternalPluginType) { *out = *in diff --git a/config/crd/bases/test.openstack.org_ansibletests.yaml b/config/crd/bases/test.openstack.org_ansibletests.yaml new file mode 100644 index 00000000..5fb0c8c3 --- /dev/null +++ b/config/crd/bases/test.openstack.org_ansibletests.yaml @@ -0,0 +1,288 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.1 + creationTimestamp: null + name: ansibletests.test.openstack.org +spec: + group: test.openstack.org + names: + kind: AnsibleTests + listKind: AnsibleTestsList + plural: ansibletests + singular: ansibletests + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: AnsibleTestsStatus is the Schema for the AnsibleTestsStatus API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: AnsibleTestsSpec defines the desired state of AnsibleTests + properties: + ansibleCollections: + default: "" + description: AnsibleCollections - extra ansible collections to instal + in additionn to the ones exist in the requirements.yaml + type: string + ansibleExtraVars: + default: "" + description: AnsibleExtraVars - string to pass parameters to ansible + using + type: string + ansibleGitRepo: + default: "" + description: AnsibleGitRepo - git repo to clone into container + type: string + ansibleInventory: + default: "" + description: AnsibleInventory - string that contains the inventory + file content + type: string + ansiblePlaybookPath: + default: "" + description: AnsiblePlaybookPath - path to ansible playbook + type: string + ansibleVarFiles: + default: "" + description: AnsibleVarFiles - interface to create ansible var files + Those get added to the + type: string + backoffLimit: + default: 0 + description: BackoffLimimt allows to define the maximum number of + retried executions (defaults to 6). + format: int32 + type: integer + computeSSHKeySecretName: + default: dataplane-ansible-ssh-private-key-secret + description: ComputeSSHKeySecretName is the name of the k8s secret + that contains an ssh key for computes. The key is mounted to ~/.ssh/id_ecdsa + in the ansible pod + type: string + containerImage: + default: quay.io/podified-antelope-centos9/openstack-ansible-tests:current-podified + description: Container image for AnsibleTests + type: string + debug: + default: false + description: Run ansible playbook with -vvvv + type: boolean + extraMounts: + description: Extra configmaps for mounting in the pod. + items: + properties: + Name: + description: The name of an existing config map for mounting. + type: string + mountPath: + description: Path within the container at which the volume should + be mounted. + type: string + subPath: + default: "" + description: Config map subpath for mounting, defaults to configmap + root. + type: string + required: + - Name + - mountPath + - subPath + type: object + type: array + openStackConfigMap: + default: openstack-config + description: OpenStackConfigMap is the name of the ConfigMap containing + the clouds.yaml + type: string + openStackConfigSecret: + default: openstack-config-secret + description: OpenStackConfigSecret is the name of the Secret containing + the secure.yaml + type: string + storageClass: + default: local-storage + description: StorageClass used to create PVCs that store the logs + type: string + workflow: + description: A parameter that contains a workflow definition. + items: + properties: + ansibleCollections: + description: AnsibleCollections - extra ansible collections + to instal in additionn to the ones exist in the requirements.yaml + type: string + ansibleExtraVars: + description: AnsibleExtraVars - interface to pass parameters + to ansible using -e + type: string + ansibleGitRepo: + description: AnsibleGitRepo - git repo to clone into container + type: string + ansibleInventory: + description: AnsibleInventory - string that contains the inventory + file content + type: string + ansiblePlaybookPath: + description: AnsiblePlaybookPath - path to ansible playbook + type: string + ansibleVarFiles: + description: AnsibleVarFiles - interface to create ansible var + files Those get added to the service config dir in /etc/test_operator/ + and passed to the ansible command using -e @/etc/test_operator/ + type: string + backoffLimit: + description: BackoffLimimt allows to define the maximum number + of retried executions (defaults to 6). + format: int32 + type: integer + computeSSHKeySecretName: + description: ComputeSSHKeySecretName is the name of the k8s + secret that contains an ssh key for computes. The key is mounted + to ~/.ssh/id_ecdsa in the ansible pod + type: string + containerImage: + description: Container image for AnsibleTests + type: string + debug: + description: Run ansible playbook with -vvvv + type: boolean + extraMounts: + description: Extra configmaps for mounting in the pod + items: + properties: + Name: + description: The name of an existing config map for mounting. + type: string + mountPath: + description: Path within the container at which the volume + should be mounted. + type: string + subPath: + default: "" + description: Config map subpath for mounting, defaults + to configmap root. + type: string + required: + - Name + - mountPath + - subPath + type: object + type: array + openStackConfigMap: + description: OpenStackConfigMap is the name of the ConfigMap + containing the clouds.yaml + type: string + openStackConfigSecret: + description: OpenStackConfigSecret is the name of the Secret + containing the secure.yaml + type: string + stepName: + description: Name of a workflow step. The step name will be + used for example to create a logs directory. + type: string + storageClass: + description: StorageClass used to create PVCs that store the + logs + type: string + workloadSSHKeySecretName: + description: WorkloadSSHKeySecretName is the name of the k8s + secret that contains an ssh key for the ansible workload. + The key is mounted to ~/test_keypair.key in the ansible pod + type: string + required: + - stepName + type: object + type: array + workloadSSHKeySecretName: + default: "" + description: WorkloadSSHKeySecretName is the name of the k8s secret + that contains an ssh key for the ansible workload. The key is mounted + to ~/test_keypair.key in the ansible pod + type: string + required: + - computeSSHKeySecretName + - openStackConfigMap + - openStackConfigSecret + - storageClass + - workloadSSHKeySecretName + type: object + status: + description: AnsibleTestsStatus defines the observed state of AnsibleTests + properties: + conditions: + description: Conditions + items: + description: Condition defines an observation of a API resource + operational state. + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status + to another. This should be when the underlying condition changed. + If that is not known, then using the time when the API field + changed is acceptable. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition + in CamelCase. + type: string + severity: + description: Severity provides a classification of Reason code, + so the current situation is immediately understandable and + could act accordingly. It is meant for situations where Status=False + and it should be indicated if it is just informational, warning + (next reconciliation might fix it) or an error (e.g. DB create + issue and no actions to automatically resolve the issue can/should + be done). For conditions where Status=Unknown or Status=True + the Severity should be SeverityNone. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition in CamelCase. + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + hash: + additionalProperties: + type: string + description: Map of hashes to track e.g. job status + type: object + networkAttachments: + additionalProperties: + items: + type: string + type: array + description: NetworkAttachments status of the deployment pods + type: object + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 91583909..8da26003 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -5,6 +5,7 @@ resources: - bases/test.openstack.org_tempests.yaml - bases/test.openstack.org_tobikoes.yaml - bases/test.openstack.org_horizontests.yaml +- bases/test.openstack.org_ansibletests.yaml #+kubebuilder:scaffold:crdkustomizeresource patchesStrategicMerge: @@ -13,6 +14,7 @@ patchesStrategicMerge: #- patches/webhook_in_tempests.yaml #- patches/webhook_in_tobikoes.yaml #- patches/webhook_in_horizontests.yaml +#- patches/webhook_in_ansible_tests.yaml #+kubebuilder:scaffold:crdkustomizewebhookpatch # [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix. @@ -20,6 +22,7 @@ patchesStrategicMerge: #- patches/cainjection_in_tempests.yaml #- patches/cainjection_in_tobikoes.yaml #- patches/cainjection_in_horizontests.yaml +#- patches/cainjection_in_ansible_tests.yaml #+kubebuilder:scaffold:crdkustomizecainjectionpatch # the following config is for teaching kustomize how to do kustomization for CRDs. diff --git a/config/crd/patches/cainjection_in_ansibleTests.yaml b/config/crd/patches/cainjection_in_ansibleTests.yaml new file mode 100644 index 00000000..6ac0c12b --- /dev/null +++ b/config/crd/patches/cainjection_in_ansibleTests.yaml @@ -0,0 +1,7 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: ansibleTest.test.openstack.org diff --git a/config/crd/patches/webhook_in_ansibleTests.yaml b/config/crd/patches/webhook_in_ansibleTests.yaml new file mode 100644 index 00000000..b53575af --- /dev/null +++ b/config/crd/patches/webhook_in_ansibleTests.yaml @@ -0,0 +1,16 @@ +# The following patch enables a conversion webhook for the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: ansibleTest.test.openstack.org +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert + conversionReviewVersions: + - v1 diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index 9bded114..f2ce75a6 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -4,4 +4,5 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization images: - name: controller - newName: quay.io/openstack-k8s-operators/test-operator-index + newName: quay.io/openstack-k8s-operators/test-operator + newTag: latest diff --git a/config/rbac/ansibleTests_editor_role.yaml b/config/rbac/ansibleTests_editor_role.yaml new file mode 100644 index 00000000..abeffdb0 --- /dev/null +++ b/config/rbac/ansibleTests_editor_role.yaml @@ -0,0 +1,31 @@ +# permissions for end users to edit ansibleTest. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: ansibleTests-editor-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: test-operator + app.kubernetes.io/part-of: test-operator + app.kubernetes.io/managed-by: kustomize + name: ansibleTests-editor-role +rules: +- apiGroups: + - test.openstack.org + resources: + - ansibleTests + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - test.openstack.org + resources: + - ansibleTests/status + verbs: + - get diff --git a/config/rbac/ansibleTests_viewer_role.yaml b/config/rbac/ansibleTests_viewer_role.yaml new file mode 100644 index 00000000..a1f3f115 --- /dev/null +++ b/config/rbac/ansibleTests_viewer_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to view ansibleTest. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: ansibleTests-viewer-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: test-operator + app.kubernetes.io/part-of: test-operator + app.kubernetes.io/managed-by: kustomize + name: ansibleTests-viewer-role +rules: +- apiGroups: + - test.openstack.org + resources: + - ansibleTests + verbs: + - get + - list + - watch +- apiGroups: + - test.openstack.org + resources: + - ansibleTests/status + verbs: + - get diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 1a7b704c..b81875d9 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -118,6 +118,32 @@ rules: - securitycontextconstraints verbs: - use +- apiGroups: + - test.openstack.org + resources: + - ansibleTests + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - test.openstack.org + resources: + - ansibleTests/finalizers + verbs: + - update +- apiGroups: + - test.openstack.org + resources: + - ansibleTests/status + verbs: + - get + - patch + - update - apiGroups: - test.openstack.org resources: diff --git a/config/samples/test_v1beta1_ansibletests.yaml b/config/samples/test_v1beta1_ansibletests.yaml new file mode 100644 index 00000000..4c26b706 --- /dev/null +++ b/config/samples/test_v1beta1_ansibletests.yaml @@ -0,0 +1,53 @@ +--- +apiVersion: test.openstack.org/v1beta1 +kind: AnsibleTests +metadata: + name: performance-tests + namespace: openstack +spec: + workloadSSHKeySecretName: 'open-tofu-ssh-keys' + debug: true + stepName: trex + ansiblePlaybookPath: playbooks/packet_gen/trex/performance_scenario.yml + #ansibleCollections: + containerImage: quay.io/rhos-dfg-nfv/test_performance:latest + ansibleGitRepo: https://github.com/eshulman2/ansible-nfv.git + ansibleInventory: | + localhost ansible_connection=local ansible_python_interpreter=python3 + undercloud-0 ansible_connection=local ansible_python_interpreter=python3 + compute-0 ansible_host=192.168.122.100 ansible_user=cloud-admin ansible_ssh_private_key_file=/var/lib/perf/.ssh/compute_id + compute-1 ansible_host=192.168.122.101 ansible_user=cloud-admin ansible_ssh_private_key_file=/var/lib/perf/.ssh/compute_id + [compute] + compute-0 + compute-1 + [local] + localhost + [undercloud] + undercloud-0 + # ansibleExtraVars: ' -e manual_run=false -e binary_perf_log=/tmp/dpdk_performance.log -e dut_group=dpdk_dut -e dut_type=dpdk -e testpmd_lcores=3,4,5 -e trex_rate=2 -e emc_insert_inv_prob=100 -e clone_traffic_gen_repo=false -e private_key_fetch_location=~/ -e python_interperter=/usr/bin/python3 ' + # ansibleVarFiles: | + # --- + # # Use exist cloud resources + # cloud_resources: external + # # DUT compute + # dut_compute: compute-1 + # # Trex config + # trex_lcores: "2-11" + # # testpmd path (for rhel 8.4 vm) + # testpmd_bin: "/root/dpdk/build/app/dpdk-testpmd" + # # Disable search rates higher than the specified + # binary_search_disable_upward_search: true + # # Used to balance queues + # binary_search_warmup_trial_runtime: 120 + # discover_instance_external_ip: true + # ssh_key: test_keypair.key + # dynamic_instances: + # - name: trex + # group: trex + # user: cloud-user + # - name: testpmd-dpdk-dut + # group: dpdk_dut + # user: cloud-user + # - name: testpmd-sriov-vf-dut + # group: sriov_dut + # user: cloud-user \ No newline at end of file diff --git a/controllers/ansibleTest_controller.go b/controllers/ansibleTest_controller.go new file mode 100644 index 00000000..9a73866d --- /dev/null +++ b/controllers/ansibleTest_controller.go @@ -0,0 +1,341 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "strconv" + "time" + + "reflect" + + "github.com/openstack-k8s-operators/lib-common/modules/common" + "github.com/openstack-k8s-operators/lib-common/modules/common/condition" + "github.com/openstack-k8s-operators/lib-common/modules/common/env" + "github.com/openstack-k8s-operators/lib-common/modules/common/helper" + "github.com/openstack-k8s-operators/lib-common/modules/common/job" + common_rbac "github.com/openstack-k8s-operators/lib-common/modules/common/rbac" + "github.com/openstack-k8s-operators/test-operator/api/v1beta1" + testv1beta1 "github.com/openstack-k8s-operators/test-operator/api/v1beta1" + "github.com/openstack-k8s-operators/test-operator/pkg/ansibleTests" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + k8s_errors "k8s.io/apimachinery/pkg/api/errors" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +type AnsibleTestsReconciler struct { + Reconciler +} + +// +kubebuilder:rbac:groups=test.openstack.org,resources=ansibleTests,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=test.openstack.org,resources=ansibleTests/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=test.openstack.org,resources=ansibleTests/finalizers,verbs=update +// +kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch;create;update;patch;delete; +// +kubebuilder:rbac:groups=core,resources=configmaps,verbs=get;list;watch;create;update;patch;delete; +// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list; +// +kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;watch;create;patch;update;delete; +// +kubebuilder:rbac:groups=k8s.cni.cncf.io,resources=network-attachment-definitions,verbs=get;list;watch + +// service account, role, rolebinding +// +kubebuilder:rbac:groups="",resources=serviceaccounts,verbs=get;list;watch;create;update +// +kubebuilder:rbac:groups="rbac.authorization.k8s.io",resources=roles,verbs=get;list;watch;create;update +// +kubebuilder:rbac:groups="rbac.authorization.k8s.io",resources=rolebindings,verbs=get;list;watch;create;update +// service account permissions that are needed to grant permission to the above +// +kubebuilder:rbac:groups="security.openshift.io",resourceNames=anyuid;privileged,resources=securitycontextconstraints,verbs=use +// +kubebuilder:rbac:groups="",resources=pods,verbs=create;delete;get;list;patch;update;watch +// +kubebuilder:rbac:groups="",resources=persistentvolumeclaims,verbs=get;list;create;update;watch;patch + +// Reconcile - AnsibleTestsReconciler +func (r *AnsibleTestsReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, _err error) { + + // How much time should we wait before calling Reconcile loop when there is a failure + requeueAfter := time.Second * 60 + + // Fetch the ansible instance + instance := &testv1beta1.AnsibleTests{} + err := r.Client.Get(ctx, req.NamespacedName, instance) + if err != nil { + if k8s_errors.IsNotFound(err) { + return ctrl.Result{}, nil + } + return ctrl.Result{}, err + } + + workflowActive := false + if len(instance.Spec.Workflow) > 0 { + workflowActive = true + } + + // Create a helper + helper, err := helper.NewHelper( + instance, + r.Client, + r.Kclient, + r.Scheme, + r.Log, + ) + if err != nil { + return ctrl.Result{}, err + } + + // Ensure that there is an external counter and read its value + // We use the external counter to keep track of the workflow steps + r.WorkflowStepCounterCreate(ctx, instance, helper) + externalWorkflowCounter := r.WorkflowStepCounterRead(ctx, instance, helper) + if externalWorkflowCounter == -1 { + return ctrl.Result{RequeueAfter: requeueAfter}, nil + } + + // Each job that is being executed by the test operator has + currentWorkflowStep := 0 + runningAnsibleJob := &batchv1.Job{} + runningJobName := r.GetJobName(instance, externalWorkflowCounter-1) + err = r.Client.Get(ctx, client.ObjectKey{Namespace: instance.GetNamespace(), Name: runningJobName}, runningAnsibleJob) + if err == nil { + currentWorkflowStep, err = strconv.Atoi(runningAnsibleJob.Labels["workflowStep"]) + } + + logging := log.FromContext(ctx) + if r.CompletedJobExists(ctx, instance, currentWorkflowStep) { + // The job created by the instance was completed. Release the lock + // so that other instances can spawn a job. + logging.Info("Job completed") + r.ReleaseLock(ctx, instance) + } + + // Service account, role, binding + rbacRules := []rbacv1.PolicyRule{ + { + APIGroups: []string{"security.openshift.io"}, + ResourceNames: []string{"anyuid", "privileged"}, + Resources: []string{"securitycontextconstraints"}, + Verbs: []string{"use"}, + }, + { + APIGroups: []string{""}, + Resources: []string{"pods"}, + Verbs: []string{"create", "get", "list", "watch", "update", "patch", "delete"}, + }, + { + APIGroups: []string{""}, + Resources: []string{"persistentvolumeclaims"}, + Verbs: []string{"get", "list", "create", "update", "watch", "patch"}, + }, + } + rbacResult, err := common_rbac.ReconcileRbac(ctx, helper, instance, rbacRules) + if err != nil { + return rbacResult, err + } else if (rbacResult != ctrl.Result{}) { + return rbacResult, nil + } + + instance.Status.Conditions.MarkTrue(condition.InputReadyCondition, condition.InputReadyMessage) + // Service account, role, binding - end + + serviceLabels := map[string]string{ + common.AppSelector: ansibleTests.ServiceName, + "workflowStep": strconv.Itoa(externalWorkflowCounter), + "instanceName": instance.Name, + "operator": "test-operator", + } + + // Create PersistentVolumeClaim + ctrlResult, err := r.EnsureLogsPVCExists( + ctx, + instance, + helper, + serviceLabels, + instance.Spec.StorageClass, + false, + ) + if err != nil { + return ctrlResult, err + } else if (ctrlResult != ctrl.Result{}) { + return ctrlResult, nil + } + // Create PersistentVolumeClaim - end + + // If the current job is executing the last workflow step -> do not create another job + if workflowActive && externalWorkflowCounter >= len(instance.Spec.Workflow) { + return ctrl.Result{}, nil + } else if !workflowActive && r.JobExists(ctx, instance, currentWorkflowStep) { + return ctrl.Result{}, nil + } + + // We are about to start job that spawns the pod with tests. + // This lock ensures that there is always only one pod running. + if !r.AcquireLock(ctx, instance, helper, false) { + logging.Info("Can not acquire lock") + requeueAfter := time.Second * 60 + return ctrl.Result{RequeueAfter: requeueAfter}, nil + } else { + logging.Info("Lock acquired") + } + + if workflowActive { + r.WorkflowStepCounterIncrease(ctx, instance, helper) + } + + instance.Status.Conditions.MarkTrue(condition.ServiceConfigReadyCondition, condition.ServiceConfigReadyMessage) + + // Create a new job + mountCerts := r.CheckSecretExists(ctx, instance, "combined-ca-bundle") + jobName := r.GetJobName(instance, externalWorkflowCounter) + envVars, workflowOverrideParams := r.PrepareAnsibleEnv(ctx, serviceLabels, instance, helper, externalWorkflowCounter) + logsPVCName := r.GetPVCLogsName(instance) + jobDef := ansibleTests.Job( + instance, + serviceLabels, + jobName, + logsPVCName, + mountCerts, + envVars, + workflowOverrideParams, + externalWorkflowCounter, + ) + ansibleTestsJob := job.NewJob( + jobDef, + testv1beta1.ConfigHash, + true, + time.Duration(5)*time.Second, + "", + ) + + ctrlResult, err = ansibleTestsJob.DoJob(ctx, helper) + if err != nil { + // Creation of the ansibleTests job was not successfull. + // Release the lock and allow other controllers to spawn + // a job. + r.ReleaseLock(ctx, instance) + instance.Status.Conditions.Set(condition.FalseCondition( + condition.DeploymentReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.DeploymentReadyErrorMessage, + err.Error())) + return ctrlResult, err + } else if (ctrlResult != ctrl.Result{}) { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.DeploymentReadyCondition, + condition.RequestedReason, + condition.SeverityInfo, + condition.DeploymentReadyRunningMessage)) + return ctrlResult, nil + } + // Create a new job - end + + r.Log.Info("Reconciled Service successfully") + return ctrl.Result{}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *AnsibleTestsReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&testv1beta1.AnsibleTests{}). + Owns(&batchv1.Job{}). + Owns(&corev1.Secret{}). + Owns(&corev1.ConfigMap{}). + Complete(r) +} + +func (r *Reconciler) OverwriteAnsibleWithWorkflow( + ctx context.Context, + instance v1beta1.AnsibleTestsSpec, + sectionName string, + workflowValueType string, + workflowStepNum int, +) interface{} { + if len(instance.Workflow)-1 < workflowStepNum { + reflected := reflect.ValueOf(instance) + fieldValue := reflected.FieldByName(sectionName) + return fieldValue.Interface() + } + + reflected := reflect.ValueOf(instance) + SpecValue := reflected.FieldByName(sectionName).Interface() + + reflected = reflect.ValueOf(instance.Workflow[workflowStepNum]) + WorkflowValue := reflected.FieldByName(sectionName).Interface() + + if workflowValueType == "pbool" { + if val, ok := WorkflowValue.(*bool); ok && val != nil { + return *(WorkflowValue.(*bool)) + } + return SpecValue.(bool) + } else if workflowValueType == "puint8" { + if val, ok := WorkflowValue.(*uint8); ok && val != nil { + return *(WorkflowValue.(*uint8)) + } + return SpecValue + } else if workflowValueType == "string" { + if val, ok := WorkflowValue.(string); ok && val != "" { + return WorkflowValue + } + return SpecValue + } + + return nil +} + +// This function prepares env variables for a single workflow step. +func (r *AnsibleTestsReconciler) PrepareAnsibleEnv( + ctx context.Context, + labels map[string]string, + instance *testv1beta1.AnsibleTests, + helper *helper.Helper, + step int, +) (map[string]env.Setter, map[string]string) { + // Prepare env vars + envVars := make(map[string]env.Setter) + workflowOverrideParams := make(map[string]string) + + // volumes workflow override + workflowOverrideParams["WorkloadSSHKeySecretName"] = r.OverwriteAnsibleWithWorkflow(ctx, instance.Spec, "WorkloadSSHKeySecretName", "string", step).(string) + workflowOverrideParams["ComputesSSHKeySecretName"] = r.OverwriteAnsibleWithWorkflow(ctx, instance.Spec, "ComputesSSHKeySecretName", "string", step).(string) + workflowOverrideParams["ContainerImage"] = r.OverwriteAnsibleWithWorkflow(ctx, instance.Spec, "ContainerImage", "string", step).(string) + + // bool + debug := r.OverwriteAnsibleWithWorkflow(ctx, instance.Spec, "Debug", "pbool", step).(bool) + if debug { + envVars["POD_DEBUG"] = env.SetValue("true") + } + + // strings + extraVars := r.OverwriteAnsibleWithWorkflow(ctx, instance.Spec, "AnsibleExtraVars", "string", step).(string) + envVars["POD_ANSIBLE_EXTRA_VARS"] = env.SetValue(extraVars) + + extraVarsFile := r.OverwriteAnsibleWithWorkflow(ctx, instance.Spec, "AnsibleVarFiles", "string", step).(string) + envVars["POD_ANSIBLE_FILE_EXTRA_VARS"] = env.SetValue(extraVarsFile) + + inventory := r.OverwriteAnsibleWithWorkflow(ctx, instance.Spec, "AnsibleInventory", "string", step).(string) + envVars["POD_ANSIBLE_INVENTORY"] = env.SetValue(inventory) + + gitRepo := r.OverwriteAnsibleWithWorkflow(ctx, instance.Spec, "AnsibleGitRepo", "string", step).(string) + envVars["POD_ANSIBLE_GIT_REPO"] = env.SetValue(gitRepo) + + playbookPath := r.OverwriteAnsibleWithWorkflow(ctx, instance.Spec, "AnsiblePlaybookPath", "string", step).(string) + envVars["POD_ANSIBLE_PLAYBOOK"] = env.SetValue(playbookPath) + + ansibleCollections := r.OverwriteAnsibleWithWorkflow(ctx, instance.Spec, "AnsibleCollections", "string", step).(string) + envVars["POD_INSTALL_COLLECTIONS"] = env.SetValue(ansibleCollections) + + return envVars, workflowOverrideParams +} diff --git a/controllers/common.go b/controllers/common.go index 94e51859..6775e3be 100644 --- a/controllers/common.go +++ b/controllers/common.go @@ -78,7 +78,14 @@ func (r *Reconciler) GetJobName(instance interface{}, workflowStepNum int) strin return typedInstance.Name + "-" + workflowStepName + jobNameStepInfix + strconv.Itoa(workflowStepNum) } } else if typedInstance, ok := instance.(*v1beta1.HorizonTest); ok { - return typedInstance.Name + return typedInstance.Name + } else if typedInstance, ok := instance.(*v1beta1.AnsibleTests); ok { + if len(typedInstance.Spec.Workflow) == 0 || workflowStepNum == workflowStepNumInvalid { + return typedInstance.Name + } else { + workflowStepName := typedInstance.Spec.Workflow[workflowStepNum].StepName + return typedInstance.Name + "-" + workflowStepName + jobNameStepInfix + strconv.Itoa(workflowStepNum) + } } else { return "" } diff --git a/main.go b/main.go index 9ac8e0ab..6b359673 100644 --- a/main.go +++ b/main.go @@ -106,6 +106,14 @@ func main() { os.Exit(1) } + ansibleReconciler := &controllers.AnsibleTestsReconciler{} + ansibleReconciler.Client = mgr.GetClient() + ansibleReconciler.Scheme = mgr.GetScheme() + if err = ansibleReconciler.SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "AnsibleTests") + os.Exit(1) + } + // Setup webhooks if requested if strings.ToLower(os.Getenv("ENABLE_WEBHOOKS")) != "false" { if err = (&testv1beta1.Tempest{}).SetupWebhookWithManager(mgr); err != nil { diff --git a/pkg/ansibleTests/const.go b/pkg/ansibleTests/const.go new file mode 100644 index 00000000..4c983903 --- /dev/null +++ b/pkg/ansibleTests/const.go @@ -0,0 +1,6 @@ +package ansibleTests + +const ( + // ServiceName - ansibleTests service name + ServiceName = "ansibleTests" +) diff --git a/pkg/ansibleTests/job.go b/pkg/ansibleTests/job.go new file mode 100644 index 00000000..8bf919e9 --- /dev/null +++ b/pkg/ansibleTests/job.go @@ -0,0 +1,78 @@ +package ansibleTests + +import ( + "github.com/openstack-k8s-operators/lib-common/modules/common/env" + + testv1beta1 "github.com/openstack-k8s-operators/test-operator/api/v1beta1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Job - prepare job to run AnsibleTests tests +func Job( + instance *testv1beta1.AnsibleTests, + labels map[string]string, + jobName string, + logsPVCName string, + mountCerts bool, + envVars map[string]env.Setter, + workflowOverrideParams map[string]string, + externalWorkflowCounter int, +) *batchv1.Job { + + runAsUser := int64(227) + runAsGroup := int64(227) + parallelism := int32(1) + completions := int32(1) + + job := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: jobName, + Namespace: instance.Namespace, + Labels: labels, + }, + Spec: batchv1.JobSpec{ + Parallelism: ¶llelism, + Completions: &completions, + BackoffLimit: instance.Spec.BackoffLimit, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + }, + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyNever, + ServiceAccountName: instance.RbacResourceName(), + SecurityContext: &corev1.PodSecurityContext{ + RunAsUser: &runAsUser, + RunAsGroup: &runAsGroup, + FSGroup: &runAsGroup, + }, + Containers: []corev1.Container{ + { + Name: instance.Name, + Image: workflowOverrideParams["ContainerImage"], + Args: []string{}, + Env: env.MergeEnvs([]corev1.EnvVar{}, envVars), + VolumeMounts: GetVolumeMounts(mountCerts, instance, externalWorkflowCounter), + SecurityContext: &corev1.SecurityContext{ + Capabilities: &corev1.Capabilities{ + Add: []corev1.Capability{"NET_ADMIN", "NET_RAW", "CAP_AUDIT_WRITE"}, + }, + }, + }, + }, + Volumes: GetVolumes( + instance, + logsPVCName, + mountCerts, + workflowOverrideParams, + externalWorkflowCounter, + ), + }, + }, + }, + } + + return job +} diff --git a/pkg/ansibleTests/volumes.go b/pkg/ansibleTests/volumes.go new file mode 100644 index 00000000..b6113302 --- /dev/null +++ b/pkg/ansibleTests/volumes.go @@ -0,0 +1,245 @@ +package ansibleTests + +import ( + testv1beta1 "github.com/openstack-k8s-operators/test-operator/api/v1beta1" + corev1 "k8s.io/api/core/v1" +) + +// GetVolumes - +func GetVolumes( + instance *testv1beta1.AnsibleTests, + logsPVCName string, + mountCerts bool, + workflowOverrideParams map[string]string, + externalWorkflowCounter int, +) []corev1.Volume { + + var scriptsVolumeConfidentialMode int32 = 0420 + var tlsCertificateMode int32 = 0444 + var privateKeyMode int32 = 0600 + var publicInfoMode int32 = 0744 + + //source_type := corev1.HostPathDirectoryOrCreate + volumes := []corev1.Volume{ + { + Name: "etc-machine-id", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/etc/machine-id", + }, + }, + }, + { + Name: "etc-localtime", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/etc/localtime", + }, + }, + }, + { + Name: "openstack-config", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + DefaultMode: &scriptsVolumeConfidentialMode, + LocalObjectReference: corev1.LocalObjectReference{ + Name: "openstack-config", + }, + }, + }, + }, + { + Name: "openstack-config-secret", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + DefaultMode: &tlsCertificateMode, + SecretName: "openstack-config-secret", + }, + }, + }, + { + Name: "test-operator-logs", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: logsPVCName, + ReadOnly: false, + }, + }, + }, + } + + if mountCerts { + caCertsVolume := corev1.Volume{ + Name: "ca-certs", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + DefaultMode: &scriptsVolumeConfidentialMode, + SecretName: "combined-ca-bundle", + }, + }, + } + + volumes = append(volumes, caCertsVolume) + } + + keysVolume := corev1.Volume{ + Name: "compute-ssh-secret", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: workflowOverrideParams["ComputesSSHKeySecretName"], + DefaultMode: &privateKeyMode, + }, + }, + } + + volumes = append(volumes, keysVolume) + + keysVolume = corev1.Volume{ + Name: "workload-ssh-secret", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: workflowOverrideParams["WorkloadSSHKeySecretName"], + DefaultMode: &privateKeyMode, + }, + }, + } + + volumes = append(volumes, keysVolume) + + for _, vol := range instance.Spec.ExtraMounts { + extraVol := corev1.Volume{ + Name: vol.Name, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + DefaultMode: &publicInfoMode, + LocalObjectReference: corev1.LocalObjectReference{ + Name: vol.Name, + }, + }, + }, + } + + volumes = append(volumes, extraVol) + } + + for _, vol := range instance.Spec.Workflow[externalWorkflowCounter].ExtraMounts { + extraWorkflowVol := corev1.Volume{ + Name: vol.Name, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + DefaultMode: &publicInfoMode, + LocalObjectReference: corev1.LocalObjectReference{ + Name: vol.Name, + }, + }, + }, + } + + volumes = append(volumes, extraWorkflowVol) + } + + return volumes +} + +// GetVolumeMounts - +func GetVolumeMounts(mountCerts bool, instance *testv1beta1.AnsibleTests, externalWorkflowCounter int) []corev1.VolumeMount { + volumeMounts := []corev1.VolumeMount{ + { + Name: "etc-machine-id", + MountPath: "/etc/machine-id", + ReadOnly: true, + }, + { + Name: "etc-localtime", + MountPath: "/etc/localtime", + ReadOnly: true, + }, + { + Name: "test-operator-logs", + MountPath: "/var/lib/AnsibleTests/external_files", + ReadOnly: false, + }, + { + Name: "openstack-config", + MountPath: "/etc/openstack/clouds.yaml", + SubPath: "clouds.yaml", + ReadOnly: true, + }, + { + Name: "openstack-config", + MountPath: "/var/lib/ansible/.config/openstack/clouds.yaml", + SubPath: "clouds.yaml", + ReadOnly: true, + }, + { + Name: "openstack-config-secret", + MountPath: "/var/lib/ansible/.config/openstack/secure.yaml", + ReadOnly: false, + SubPath: "secure.yaml", + }, + } + + if mountCerts { + caCertVolumeMount := corev1.VolumeMount{ + Name: "ca-certs", + MountPath: "/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem", + ReadOnly: true, + SubPath: "tls-ca-bundle.pem", + } + + volumeMounts = append(volumeMounts, caCertVolumeMount) + + caCertVolumeMount = corev1.VolumeMount{ + Name: "ca-certs", + MountPath: "/etc/pki/tls/certs/ca-bundle.trust.crt", + ReadOnly: true, + SubPath: "tls-ca-bundle.pem", + } + + volumeMounts = append(volumeMounts, caCertVolumeMount) + } + + workloadSSHKeyMount := corev1.VolumeMount{ + Name: "workload-ssh-secret", + MountPath: "/var/lib/ansible/test_keypair.key", + SubPath: "ssh-privatekey", + ReadOnly: true, + } + + volumeMounts = append(volumeMounts, workloadSSHKeyMount) + + computeSSHKeyMount := corev1.VolumeMount{ + Name: "compute-ssh-secret", + MountPath: "/var/lib/ansible/.ssh/compute_id", + SubPath: "ssh-privatekey", + ReadOnly: true, + } + + volumeMounts = append(volumeMounts, computeSSHKeyMount) + + for _, vol := range instance.Spec.ExtraMounts { + + extraMounts := corev1.VolumeMount{ + Name: vol.Name, + MountPath: vol.MountPath, + SubPath: vol.SubPath, + ReadOnly: true, + } + + volumeMounts = append(volumeMounts, extraMounts) + } + + for _, vol := range instance.Spec.Workflow[externalWorkflowCounter].ExtraMounts { + + extraMounts := corev1.VolumeMount{ + Name: vol.Name, + MountPath: vol.MountPath, + SubPath: vol.SubPath, + ReadOnly: true, + } + + volumeMounts = append(volumeMounts, extraMounts) + } + + return volumeMounts +}