-
Notifications
You must be signed in to change notification settings - Fork 60
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Support union types #77
Comments
I think the problem is here that https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.10/#volume-v1-core Is is valid for a The current spec is: "io.k8s.api.core.v1.Volume": {
"description": "Volume represents a named volume in a pod that may be accessed by any container in the pod.",
"properties": {
"awsElasticBlockStore": {
"$ref": "#/definitions/io.k8s.api.core.v1.AWSElasticBlockStoreVolumeSource",
"description": "AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore"
},
"azureDisk": {
"$ref": "#/definitions/io.k8s.api.core.v1.AzureDiskVolumeSource",
"description": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod."
},
"azureFile": {
"$ref": "#/definitions/io.k8s.api.core.v1.AzureFileVolumeSource",
"description": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod."
},
"cephfs": {
"$ref": "#/definitions/io.k8s.api.core.v1.CephFSVolumeSource",
"description": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime"
},
"cinder": {
"$ref": "#/definitions/io.k8s.api.core.v1.CinderVolumeSource",
"description": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md"
},
"configMap": {
"$ref": "#/definitions/io.k8s.api.core.v1.ConfigMapVolumeSource",
"description": "ConfigMap represents a configMap that should populate this volume"
},
"csi": {
"$ref": "#/definitions/io.k8s.api.core.v1.CSIVolumeSource",
"description": "CSI (Container Storage Interface) represents storage that is handled by an external CSI driver (Alpha feature)."
},
"downwardAPI": {
"$ref": "#/definitions/io.k8s.api.core.v1.DownwardAPIVolumeSource",
"description": "DownwardAPI represents downward API about the pod that should populate this volume"
},
"emptyDir": {
"$ref": "#/definitions/io.k8s.api.core.v1.EmptyDirVolumeSource",
"description": "EmptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir"
},
"fc": {
"$ref": "#/definitions/io.k8s.api.core.v1.FCVolumeSource",
"description": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod."
},
"flexVolume": {
"$ref": "#/definitions/io.k8s.api.core.v1.FlexVolumeSource",
"description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin."
},
"flocker": {
"$ref": "#/definitions/io.k8s.api.core.v1.FlockerVolumeSource",
"description": "Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running"
},
"gcePersistentDisk": {
"$ref": "#/definitions/io.k8s.api.core.v1.GCEPersistentDiskVolumeSource",
"description": "GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk"
},
"gitRepo": {
"$ref": "#/definitions/io.k8s.api.core.v1.GitRepoVolumeSource",
"description": "GitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container."
},
"glusterfs": {
"$ref": "#/definitions/io.k8s.api.core.v1.GlusterfsVolumeSource",
"description": "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md"
},
"hostPath": {
"$ref": "#/definitions/io.k8s.api.core.v1.HostPathVolumeSource",
"description": "HostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath"
},
"iscsi": {
"$ref": "#/definitions/io.k8s.api.core.v1.ISCSIVolumeSource",
"description": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md"
},
"name": {
"description": "Volume's name. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names",
"type": "string"
},
"nfs": {
"$ref": "#/definitions/io.k8s.api.core.v1.NFSVolumeSource",
"description": "NFS represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs"
},
"persistentVolumeClaim": {
"$ref": "#/definitions/io.k8s.api.core.v1.PersistentVolumeClaimVolumeSource",
"description": "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims"
},
"photonPersistentDisk": {
"$ref": "#/definitions/io.k8s.api.core.v1.PhotonPersistentDiskVolumeSource",
"description": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine"
},
"portworxVolume": {
"$ref": "#/definitions/io.k8s.api.core.v1.PortworxVolumeSource",
"description": "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine"
},
"projected": {
"$ref": "#/definitions/io.k8s.api.core.v1.ProjectedVolumeSource",
"description": "Items for all in one resources secrets, configmaps, and downward API"
},
"quobyte": {
"$ref": "#/definitions/io.k8s.api.core.v1.QuobyteVolumeSource",
"description": "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime"
},
"rbd": {
"$ref": "#/definitions/io.k8s.api.core.v1.RBDVolumeSource",
"description": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md"
},
"scaleIO": {
"$ref": "#/definitions/io.k8s.api.core.v1.ScaleIOVolumeSource",
"description": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes."
},
"secret": {
"$ref": "#/definitions/io.k8s.api.core.v1.SecretVolumeSource",
"description": "Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret"
},
"storageos": {
"$ref": "#/definitions/io.k8s.api.core.v1.StorageOSVolumeSource",
"description": "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes."
},
"vsphereVolume": {
"$ref": "#/definitions/io.k8s.api.core.v1.VsphereVirtualDiskVolumeSource",
"description": "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine"
}
},
"required": [
"name"
],
"type": "object"
}, But only one of those keys can be set at the same time. I think but we will have to ask upstream to change the openAPI spec to properly denote that this is a sum type and not a product type. Then we could change our generator accordingly. |
If definitions:
io.k8s.api.core.v1.Volume:
oneOf:
- type: object
properties:
awsElasticBlockStore:
$ref: #/definitions/io.k8s.api.core.v1.AWSElasticBlockStoreVolumeSource
- type: object
properties:
azureDisk:
$ref: #/definitions/io.k8s.api.core.v1.AzureDiskVolumeSource
- type: object
properties:
azureFile:
$ref: #/definitions/io.k8s.api.core.v1.AzureFileVolumeSource
- type: object
properties:
cephfs:
$ref: #/definitions/io.k8s.api.core.v1.CephFSVolumeSource
- type: object
properties:
cinder:
$ref: #/definitions/io.k8s.api.core.v1.CinderVolumeSource
- type: object
properties:
configMap:
$ref: #/definitions/io.k8s.api.core.v1.ConfigMapVolumeSource
- type: object
properties:
csi:
$ref: #/definitions/io.k8s.api.core.v1.CSIVolumeSource
- type: object
properties:
downwardAPI:
$ref: #/definitions/io.k8s.api.core.v1.DownwardAPIVolumeSource
- type: object
properties:
emptyDir:
$ref: #/definitions/io.k8s.api.core.v1.EmptyDirVolumeSourcg
|
Upstream issue: kubernetes/kubernetes#51163 kubernetes/kubernetes#79472 ( |
@ari-becker for now we could add a special case in the generator for |
@arianvp Sure, sounds workable to me |
there's a PR that adds union types to k8s. doesnt seem like it will be merged soon but it's useful for us to cross-reference to see what types in the API are actually unions https://github.com/kubernetes/kubernetes/pull/77370/files |
Closing this because: a) the original issue is stale - support went back to omitting null rather than |
The new API presumes that one runs
dhall-to-yaml --omitEmpty
. However, there are areas in the API which expect an empty block to be defined, for example,which unfortunately renders as
instead of:
The current workaround (in this specific case) is to hard-code the default medium (which is the empty string) so that
emptyDir
renders and Kubernetes knows that the volume is anemptyDir
volume. But it makes me uneasy... I know that there are various areas of thedhall-kops
API which work the same way.The text was updated successfully, but these errors were encountered: