diff --git a/data/data/install.openshift.io_installconfigs.yaml b/data/data/install.openshift.io_installconfigs.yaml index 1369be1a962..964f87930e3 100644 --- a/data/data/install.openshift.io_installconfigs.yaml +++ b/data/data/install.openshift.io_installconfigs.yaml @@ -57,6 +57,45 @@ spec: - "" - amd64 type: string + diskSetup: + description: |- + DiskSetup stores the type of disks that will be setup with MachineConfigs. + The available types are etcd, swap and user-defined. + items: + description: |- + Disk defines the type of disk (etcd, swap or user-defined) and the configuration + of each disk type. + properties: + etcd: + description: DiskEtcd defines a disk type of etcd. + properties: + platformDiskID: + type: string + type: object + swap: + description: DiskSwap defines a disk type of swap. + properties: + platformDiskID: + type: string + type: object + type: + description: DiskType is the string representation of the three + types disk setups + enum: + - etcd + - swap + - user-defined + type: string + userDefined: + description: DiskUserDefined defines a disk type of user-defined. + properties: + mountPath: + type: string + platformDiskID: + type: string + type: object + type: object + type: array fencing: description: |- Fencing stores the information about a baremetal host's management controller. @@ -1458,6 +1497,45 @@ spec: - "" - amd64 type: string + diskSetup: + description: |- + DiskSetup stores the type of disks that will be setup with MachineConfigs. + The available types are etcd, swap and user-defined. + items: + description: |- + Disk defines the type of disk (etcd, swap or user-defined) and the configuration + of each disk type. + properties: + etcd: + description: DiskEtcd defines a disk type of etcd. + properties: + platformDiskID: + type: string + type: object + swap: + description: DiskSwap defines a disk type of swap. + properties: + platformDiskID: + type: string + type: object + type: + description: DiskType is the string representation of the + three types disk setups + enum: + - etcd + - swap + - user-defined + type: string + userDefined: + description: DiskUserDefined defines a disk type of user-defined. + properties: + mountPath: + type: string + platformDiskID: + type: string + type: object + type: object + type: array fencing: description: |- Fencing stores the information about a baremetal host's management controller. @@ -2799,6 +2877,45 @@ spec: - "" - amd64 type: string + diskSetup: + description: |- + DiskSetup stores the type of disks that will be setup with MachineConfigs. + The available types are etcd, swap and user-defined. + items: + description: |- + Disk defines the type of disk (etcd, swap or user-defined) and the configuration + of each disk type. + properties: + etcd: + description: DiskEtcd defines a disk type of etcd. + properties: + platformDiskID: + type: string + type: object + swap: + description: DiskSwap defines a disk type of swap. + properties: + platformDiskID: + type: string + type: object + type: + description: DiskType is the string representation of the three + types disk setups + enum: + - etcd + - swap + - user-defined + type: string + userDefined: + description: DiskUserDefined defines a disk type of user-defined. + properties: + mountPath: + type: string + platformDiskID: + type: string + type: object + type: object + type: array fencing: description: |- Fencing stores the information about a baremetal host's management controller. diff --git a/go.mod b/go.mod index a3bd779f857..caaa3234cbc 100644 --- a/go.mod +++ b/go.mod @@ -85,7 +85,7 @@ require ( github.com/nutanix-cloud-native/cluster-api-provider-nutanix v1.5.4-0.20250116153252-296a5347104c github.com/nutanix-cloud-native/prism-go-client v0.5.0 github.com/onsi/gomega v1.36.2 - github.com/openshift/api v0.0.0-20250527072845-f5e205b58365 + github.com/openshift/api v0.0.0-20250704153732-ad766c4e6d8e github.com/openshift/assisted-image-service v0.0.0-20240607085136-02df2e56dde6 github.com/openshift/assisted-service/api v0.0.0 github.com/openshift/assisted-service/client v0.0.0 diff --git a/go.sum b/go.sum index 1c202e9d2c3..403b60cf336 100644 --- a/go.sum +++ b/go.sum @@ -722,8 +722,8 @@ github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQ github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/openshift/api v0.0.0-20250527072845-f5e205b58365 h1:WfJTorFO5mJP6DLhK84K83TWuSqmeC3jCN436stKRZk= -github.com/openshift/api v0.0.0-20250527072845-f5e205b58365/go.mod h1:yk60tHAmHhtVpJQo3TwVYq2zpuP70iJIFDCmeKMIzPw= +github.com/openshift/api v0.0.0-20250704153732-ad766c4e6d8e h1:9hzClGu+YAziWP6X93S/To86Q6P8aIfAeasl7zIPYA8= +github.com/openshift/api v0.0.0-20250704153732-ad766c4e6d8e/go.mod h1:yk60tHAmHhtVpJQo3TwVYq2zpuP70iJIFDCmeKMIzPw= github.com/openshift/assisted-image-service v0.0.0-20240607085136-02df2e56dde6 h1:U6ve+dnHlHhAELoxX+rdFOHVhoaYl0l9qtxwYtsO6C0= github.com/openshift/assisted-image-service v0.0.0-20240607085136-02df2e56dde6/go.mod h1:o2H5VwQhUD8P6XsK6dRmKpCCJqVvv12KJQZBXmcCXCU= github.com/openshift/assisted-service v1.0.10-0.20230830164851-6573b5d7021d h1:CKw2Y4EdaFsMoqAdr2Tq0nlYTaaXmCRdP0gOu7pN64U= diff --git a/pkg/asset/machines/machineconfig/disks.go b/pkg/asset/machines/machineconfig/disks.go new file mode 100644 index 00000000000..b4561c1076f --- /dev/null +++ b/pkg/asset/machines/machineconfig/disks.go @@ -0,0 +1,173 @@ +package machineconfig + +import ( + "bytes" + "fmt" + "regexp" + "strings" + "text/template" + + igntypes "github.com/coreos/ignition/v2/config/v3_2/types" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/ptr" + + mcfgv1 "github.com/openshift/api/machineconfiguration/v1" + "github.com/openshift/installer/pkg/asset/ignition" + "github.com/openshift/installer/pkg/types" +) + +// DiskMountUnit is used to supply the template the proper fields to produce the unit string. +type diskMount struct { + MountPath string + Label string +} + +const diskMountUnit = ` +[Unit] +Requires=systemd-fsck@dev-disk-by\x2dpartlabel-{{.Label}}.service +After=systemd-fsck@dev-disk-by\x2dpartlabel-{{.Label}}.service + +[Mount] +Where={{.MountPath}} +What=/dev/disk/by-partlabel/{{.Label}} +Type=xfs +Options=defaults,prjquota + +[Install] +RequiredBy=local-fs.target +` + +const swapMountUnit = ` +[Swap] +What=/dev/disk/by-partlabel/{{.Label}} + +[Install] +WantedBy=swap.target +` + +const gptSwap = "0657FD6D-A4AB-43C4-84E5-0933C84B4F4F" + +// ForDiskSetup generates a machine config for the three disk setup types, etcd, swap or user-defined. +func ForDiskSetup(role, device, label, path string, diskType types.DiskType) (*mcfgv1.MachineConfig, error) { + ignConfig := igntypes.Config{ + Ignition: igntypes.Ignition{ + Version: igntypes.MaxVersion.String(), + }, + } + + // Remove all non-alphanumeric characters from the label + reg := regexp.MustCompile(`[^a-zA-Z0-9]+`) + label = reg.ReplaceAllString(label, "") + + mountUnit := diskMount{ + MountPath: path, + Label: label, + } + + var templateStringToParse string + switch diskType { + case types.Etcd, types.UserDefined: + templateStringToParse = diskMountUnit + case types.Swap: + templateStringToParse = swapMountUnit + } + + diskMountUnitTemplate, err := template.New("mountUnit").Parse(templateStringToParse) + if err != nil { + return nil, err + } + + var dmu bytes.Buffer + err = diskMountUnitTemplate.Execute(&dmu, mountUnit) + if err != nil { + return nil, err + } + + units := dmu.String() + + var rawExt runtime.RawExtension + switch diskType { + case types.Etcd, types.UserDefined: + rawExt, err = getDiskIgnition(ignConfig, device, label, path, units) + if err != nil { + return nil, err + } + case types.Swap: + rawExt, err = getSwapIgnition(ignConfig, device, label, units) + if err != nil { + return nil, err + } + } + + return &mcfgv1.MachineConfig{ + TypeMeta: metav1.TypeMeta{ + APIVersion: mcfgv1.SchemeGroupVersion.String(), + Kind: "MachineConfig", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("01-disk-setup-%s-%s", label, role), + Labels: map[string]string{ + "machineconfiguration.openshift.io/role": role, + }, + }, + Spec: mcfgv1.MachineConfigSpec{ + Config: rawExt, + }, + }, nil +} +func getDiskIgnition(ignConfig igntypes.Config, device, label, path, units string) (runtime.RawExtension, error) { + unitName := strings.Trim(path, "/") + unitName = strings.ReplaceAll(unitName, "/", "-") + + ignConfig.Storage.Disks = append(ignConfig.Storage.Disks, igntypes.Disk{ + Device: device, + Partitions: []igntypes.Partition{{ + Label: ptr.To(label), + StartMiB: ptr.To(0), + SizeMiB: ptr.To(0), + }}, + WipeTable: ptr.To(true), + }) + + ignConfig.Storage.Filesystems = append(ignConfig.Storage.Filesystems, igntypes.Filesystem{ + Device: fmt.Sprintf("/dev/disk/by-partlabel/%s", label), + Format: ptr.To("xfs"), + Label: ptr.To(label), + MountOptions: []igntypes.MountOption{"defaults", "prjquota"}, + Path: ptr.To(path), + WipeFilesystem: ptr.To(true), + }) + ignConfig.Systemd.Units = append(ignConfig.Systemd.Units, igntypes.Unit{ + Name: fmt.Sprintf("%s.mount", unitName), + Enabled: ptr.To(true), + Contents: &units, + }) + return ignition.ConvertToRawExtension(ignConfig) +} + +func getSwapIgnition(ignConfig igntypes.Config, device, label, units string) (runtime.RawExtension, error) { + unitName := "dev-disk-by\\x2dpartlabel-swap.swap" + ignConfig.Storage.Disks = append(ignConfig.Storage.Disks, igntypes.Disk{ + Device: device, + Partitions: []igntypes.Partition{{ + Label: ptr.To(label), + StartMiB: ptr.To(0), + SizeMiB: ptr.To(0), + GUID: ptr.To(gptSwap), + }}, + WipeTable: ptr.To(true), + }) + + ignConfig.Storage.Filesystems = append(ignConfig.Storage.Filesystems, igntypes.Filesystem{ + Device: fmt.Sprintf("/dev/disk/by-partlabel/%s", label), + Format: ptr.To("swap"), + Label: ptr.To(label), + }) + ignConfig.Systemd.Units = append(ignConfig.Systemd.Units, igntypes.Unit{ + Name: unitName, + Enabled: ptr.To(true), + Contents: &units, + }) + return ignition.ConvertToRawExtension(ignConfig) +} diff --git a/pkg/asset/machines/machineconfig/disks_test.go b/pkg/asset/machines/machineconfig/disks_test.go new file mode 100644 index 00000000000..c350166bf7c --- /dev/null +++ b/pkg/asset/machines/machineconfig/disks_test.go @@ -0,0 +1,432 @@ +package machineconfig + +import ( + "encoding/json" + "strings" + "testing" + + igntypes "github.com/coreos/ignition/v2/config/v3_2/types" + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + mcfgv1 "github.com/openshift/api/machineconfiguration/v1" + "github.com/openshift/installer/pkg/types" +) + +// Generated by Cursor + +func TestForDiskSetup(t *testing.T) { + cases := []struct { + name string + role string + device string + label string + path string + diskType types.DiskType + expectedError string + expectedConfig *mcfgv1.MachineConfig + }{ + { + name: "etcd disk setup", + role: "master", + device: "/dev/sdb", + label: "etcd", + path: "/var/lib/etcd", + diskType: types.Etcd, + expectedConfig: &mcfgv1.MachineConfig{ + TypeMeta: metav1.TypeMeta{ + APIVersion: mcfgv1.SchemeGroupVersion.String(), + Kind: "MachineConfig", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "01-disk-setup-etcd-master", + Labels: map[string]string{ + "machineconfiguration.openshift.io/role": "master", + }, + }, + Spec: mcfgv1.MachineConfigSpec{ + Config: runtime.RawExtension{}, + }, + }, + }, + { + name: "user-defined disk setup", + role: "worker", + device: "/dev/sdc", + label: "userdata", + path: "/var/lib/userdata", + diskType: types.UserDefined, + expectedConfig: &mcfgv1.MachineConfig{ + TypeMeta: metav1.TypeMeta{ + APIVersion: mcfgv1.SchemeGroupVersion.String(), + Kind: "MachineConfig", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "01-disk-setup-userdata-worker", + Labels: map[string]string{ + "machineconfiguration.openshift.io/role": "worker", + }, + }, + Spec: mcfgv1.MachineConfigSpec{ + Config: runtime.RawExtension{}, + }, + }, + }, + { + name: "swap disk setup", + role: "master", + device: "/dev/sdd", + label: "swap", + path: "", + diskType: types.Swap, + expectedConfig: &mcfgv1.MachineConfig{ + TypeMeta: metav1.TypeMeta{ + APIVersion: mcfgv1.SchemeGroupVersion.String(), + Kind: "MachineConfig", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "01-disk-setup-swap-master", + Labels: map[string]string{ + "machineconfiguration.openshift.io/role": "master", + }, + }, + Spec: mcfgv1.MachineConfigSpec{ + Config: runtime.RawExtension{}, + }, + }, + }, + { + name: "label with special characters", + role: "worker", + device: "/dev/sde", + label: "etcd-data[123]", + path: "/var/lib/etcd", + diskType: types.Etcd, + expectedConfig: &mcfgv1.MachineConfig{ + TypeMeta: metav1.TypeMeta{ + APIVersion: mcfgv1.SchemeGroupVersion.String(), + Kind: "MachineConfig", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "01-disk-setup-etcddata123-worker", + Labels: map[string]string{ + "machineconfiguration.openshift.io/role": "worker", + }, + }, + Spec: mcfgv1.MachineConfigSpec{ + Config: runtime.RawExtension{}, + }, + }, + }, + { + name: "complex path with slashes", + role: "master", + device: "/dev/sdf", + label: "data", + path: "/var/lib/openshift/data", + diskType: types.UserDefined, + expectedConfig: &mcfgv1.MachineConfig{ + TypeMeta: metav1.TypeMeta{ + APIVersion: mcfgv1.SchemeGroupVersion.String(), + Kind: "MachineConfig", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "01-disk-setup-data-master", + Labels: map[string]string{ + "machineconfiguration.openshift.io/role": "master", + }, + }, + Spec: mcfgv1.MachineConfigSpec{ + Config: runtime.RawExtension{}, + }, + }, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + machineConfig, err := ForDiskSetup(tc.role, tc.device, tc.label, tc.path, tc.diskType) + + if tc.expectedError != "" { + assert.Error(t, err) + assert.Contains(t, err.Error(), tc.expectedError) + return + } + + assert.NoError(t, err) + assert.NotNil(t, machineConfig) + + // Verify basic structure + assert.Equal(t, tc.expectedConfig.TypeMeta, machineConfig.TypeMeta) + assert.Equal(t, tc.expectedConfig.ObjectMeta.Name, machineConfig.ObjectMeta.Name) + assert.Equal(t, tc.expectedConfig.ObjectMeta.Labels, machineConfig.ObjectMeta.Labels) + + // Verify the config is not empty + assert.NotNil(t, machineConfig.Spec.Config) + assert.NotEmpty(t, machineConfig.Spec.Config.Raw) + + // For label checks, use the sanitized label as produced by the code + sanitizedLabel := "" + switch tc.name { + case "label with special characters": + sanitizedLabel = "etcddata123" + case "user-defined disk setup": + sanitizedLabel = "userdata" + case "etcd disk setup": + sanitizedLabel = "etcd" + case "complex path with slashes": + sanitizedLabel = "data" + case "swap disk setup": + sanitizedLabel = "swap" + } + + // Verify specific disk type behavior + switch tc.diskType { + case types.Etcd, types.UserDefined: + assertEtcdUserDefinedConfig(t, machineConfig, tc.device, sanitizedLabel, tc.path) + case types.Swap: + assertSwapConfig(t, machineConfig, tc.device, sanitizedLabel) + } + }) + } +} + +func assertEtcdUserDefinedConfig(t *testing.T, mc *mcfgv1.MachineConfig, device, label, path string) { + t.Helper() + + // Parse the ignition config to verify disk and filesystem configuration + ignConfig, err := parseIgnitionConfig(mc.Spec.Config.Raw) + assert.NoError(t, err) + + // Verify disk configuration + assert.Len(t, ignConfig.Storage.Disks, 1) + disk := ignConfig.Storage.Disks[0] + assert.Equal(t, device, disk.Device) + assert.Len(t, disk.Partitions, 1) + assert.Equal(t, label, *disk.Partitions[0].Label) + assert.Equal(t, int64(0), int64(*disk.Partitions[0].StartMiB)) + assert.Equal(t, int64(0), int64(*disk.Partitions[0].SizeMiB)) + assert.True(t, *disk.WipeTable) + + // Verify filesystem configuration + assert.Len(t, ignConfig.Storage.Filesystems, 1) + fs := ignConfig.Storage.Filesystems[0] + assert.Equal(t, "/dev/disk/by-partlabel/"+label, fs.Device) + assert.Equal(t, "xfs", *fs.Format) + assert.Equal(t, label, *fs.Label) + assert.Equal(t, path, *fs.Path) + assert.True(t, *fs.WipeFilesystem) + // MountOptions is []igntypes.MountOption (alias for string), so compare as strings + mountOpts := make([]string, 0, len(fs.MountOptions)) + for _, opt := range fs.MountOptions { + mountOpts = append(mountOpts, string(opt)) + } + assert.Contains(t, mountOpts, "defaults") + assert.Contains(t, mountOpts, "prjquota") + + // Verify systemd unit + assert.Len(t, ignConfig.Systemd.Units, 1) + unit := ignConfig.Systemd.Units[0] + expectedUnitName := path + expectedUnitName = strings.Trim(expectedUnitName, "/") + expectedUnitName = strings.ReplaceAll(expectedUnitName, "/", "-") + expectedUnitName += ".mount" + assert.Equal(t, expectedUnitName, unit.Name) + assert.True(t, *unit.Enabled) + assert.NotNil(t, unit.Contents) +} + +func assertSwapConfig(t *testing.T, mc *mcfgv1.MachineConfig, device, label string) { + t.Helper() + + // Parse the ignition config to verify disk and filesystem configuration + ignConfig, err := parseIgnitionConfig(mc.Spec.Config.Raw) + assert.NoError(t, err) + + // Verify disk configuration + assert.Len(t, ignConfig.Storage.Disks, 1) + disk := ignConfig.Storage.Disks[0] + assert.Equal(t, device, disk.Device) + assert.Len(t, disk.Partitions, 1) + assert.Equal(t, label, *disk.Partitions[0].Label) + assert.Equal(t, int64(0), int64(*disk.Partitions[0].StartMiB)) + assert.Equal(t, int64(0), int64(*disk.Partitions[0].SizeMiB)) + assert.Equal(t, "0657FD6D-A4AB-43C4-84E5-0933C84B4F4F", *disk.Partitions[0].GUID) + assert.True(t, *disk.WipeTable) + + // Verify filesystem configuration + assert.Len(t, ignConfig.Storage.Filesystems, 1) + fs := ignConfig.Storage.Filesystems[0] + assert.Equal(t, "/dev/disk/by-partlabel/"+label, fs.Device) + assert.Equal(t, "swap", *fs.Format) + assert.Equal(t, label, *fs.Label) + + // Verify systemd unit + assert.Len(t, ignConfig.Systemd.Units, 1) + unit := ignConfig.Systemd.Units[0] + assert.Equal(t, "dev-disk-by\\x2dpartlabel-swap.swap", unit.Name) + assert.True(t, *unit.Enabled) + assert.NotNil(t, unit.Contents) +} + +func TestDiskMountUnit(t *testing.T) { + cases := []struct { + name string + mountUnit diskMount + expected string + }{ + { + name: "basic mount unit", + mountUnit: diskMount{ + MountPath: "/var/lib/etcd", + Label: "etcd", + }, + expected: `[Unit] +Requires=systemd-fsck@dev-disk-by\x2dpartlabel-etcd.service +After=systemd-fsck@dev-disk-by\x2dpartlabel-etcd.service + +[Mount] +Where=/var/lib/etcd +What=/dev/disk/by-partlabel/etcd +Type=xfs +Options=defaults,prjquota + +[Install] +RequiredBy=local-fs.target +`, + }, + { + name: "complex path mount unit", + mountUnit: diskMount{ + MountPath: "/var/lib/openshift/data", + Label: "userdata", + }, + expected: `[Unit] +Requires=systemd-fsck@dev-disk-by\x2dpartlabel-userdata.service +After=systemd-fsck@dev-disk-by\x2dpartlabel-userdata.service + +[Mount] +Where=/var/lib/openshift/data +What=/dev/disk/by-partlabel/userdata +Type=xfs +Options=defaults,prjquota + +[Install] +RequiredBy=local-fs.target +`, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + // Test the template execution by creating a machine config + // and verifying the generated unit content + machineConfig, err := ForDiskSetup("test", "/dev/sdb", tc.mountUnit.Label, tc.mountUnit.MountPath, types.Etcd) + assert.NoError(t, err) + + ignConfig, err := parseIgnitionConfig(machineConfig.Spec.Config.Raw) + assert.NoError(t, err) + + assert.Len(t, ignConfig.Systemd.Units, 1) + unit := ignConfig.Systemd.Units[0] + assert.NotNil(t, unit.Contents) + assert.Contains(t, *unit.Contents, tc.mountUnit.MountPath) + assert.Contains(t, *unit.Contents, tc.mountUnit.Label) + }) + } +} + +func TestSwapMountUnit(t *testing.T) { + cases := []struct { + name string + label string + expected string + }{ + { + name: "swap mount unit", + label: "swap", + expected: `[Swap] +What=/dev/disk/by-partlabel/swap + +[Install] +WantedBy=swap.target +`, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + // Test the template execution by creating a machine config + // and verifying the generated unit content + machineConfig, err := ForDiskSetup("test", "/dev/sdb", tc.label, "", types.Swap) + assert.NoError(t, err) + + ignConfig, err := parseIgnitionConfig(machineConfig.Spec.Config.Raw) + assert.NoError(t, err) + + assert.Len(t, ignConfig.Systemd.Units, 1) + unit := ignConfig.Systemd.Units[0] + assert.NotNil(t, unit.Contents) + assert.Contains(t, *unit.Contents, tc.label) + }) + } +} + +func TestLabelSanitization(t *testing.T) { + cases := []struct { + name string + inputLabel string + expectedLabel string + }{ + { + name: "simple label", + inputLabel: "etcd", + expectedLabel: "etcd", + }, + { + name: "label with brackets", + inputLabel: "etcd[123]", + expectedLabel: "etcd123", + }, + { + name: "label with alphanumeric brackets", + inputLabel: "data[abc123]", + expectedLabel: "dataabc123", + }, + { + name: "label with multiple brackets", + inputLabel: "disk[1][2][3]", + expectedLabel: "disk123", + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + machineConfig, err := ForDiskSetup("test", "/dev/sdb", tc.inputLabel, "/var/lib/data", types.Etcd) + assert.NoError(t, err) + + ignConfig, err := parseIgnitionConfig(machineConfig.Spec.Config.Raw) + assert.NoError(t, err) + + // Verify the sanitized label is used in the disk partition + assert.Len(t, ignConfig.Storage.Disks, 1) + disk := ignConfig.Storage.Disks[0] + assert.Len(t, disk.Partitions, 1) + assert.Equal(t, tc.expectedLabel, *disk.Partitions[0].Label) + + // Verify the sanitized label is used in the filesystem + assert.Len(t, ignConfig.Storage.Filesystems, 1) + fs := ignConfig.Storage.Filesystems[0] + assert.Equal(t, "/dev/disk/by-partlabel/"+tc.expectedLabel, fs.Device) + assert.Equal(t, tc.expectedLabel, *fs.Label) + }) + } +} + +// Helper function to parse ignition config from raw bytes. +func parseIgnitionConfig(raw []byte) (*igntypes.Config, error) { + ignConfig := &igntypes.Config{} + err := json.Unmarshal(raw, ignConfig) + return ignConfig, err +} diff --git a/pkg/asset/machines/master.go b/pkg/asset/machines/master.go index 432befbbf6b..cfa149005f2 100644 --- a/pkg/asset/machines/master.go +++ b/pkg/asset/machines/master.go @@ -20,6 +20,7 @@ import ( "sigs.k8s.io/yaml" configv1 "github.com/openshift/api/config/v1" + "github.com/openshift/api/features" machinev1 "github.com/openshift/api/machine/v1" machinev1alpha1 "github.com/openshift/api/machine/v1alpha1" machinev1beta1 "github.com/openshift/api/machine/v1beta1" @@ -615,6 +616,30 @@ func (m *Master) Generate(ctx context.Context, dependencies asset.Parents) error machineConfigs = append(machineConfigs, ignIPv6) } + if installConfig.Config.EnabledFeatureGates().Enabled(features.FeatureGateMultiDiskSetup) { + for i, diskSetup := range installConfig.Config.ControlPlane.DiskSetup { + var dataDisk any + switch ic.Platform.Name() { + case azuretypes.Name: + azureControlPlaneMachinePool := ic.ControlPlane.Platform.Azure + + if i < len(azureControlPlaneMachinePool.DataDisks) { + dataDisk = azureControlPlaneMachinePool.DataDisks[i] + } + default: + return errors.Errorf("disk setup for %s is not supported", ic.Platform.Name()) + } + + if dataDisk != nil { + diskSetupIgn, err := NodeDiskSetup(installConfig, "master", diskSetup, dataDisk) + if err != nil { + return errors.Wrap(err, "failed to create ignition to setup disks for control plane") + } + machineConfigs = append(machineConfigs, diskSetupIgn) + } + } + } + m.MachineConfigFiles, err = machineconfig.Manifests(machineConfigs, "master", directory) if err != nil { return errors.Wrap(err, "failed to create MachineConfig manifests for master machines") diff --git a/pkg/asset/machines/util.go b/pkg/asset/machines/util.go new file mode 100644 index 00000000000..d5077640025 --- /dev/null +++ b/pkg/asset/machines/util.go @@ -0,0 +1,52 @@ +package machines + +import ( + "fmt" + + "github.com/pkg/errors" + "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" + + "github.com/openshift/api/features" + v1 "github.com/openshift/api/machineconfiguration/v1" + "github.com/openshift/installer/pkg/asset/installconfig" + "github.com/openshift/installer/pkg/asset/machines/machineconfig" + "github.com/openshift/installer/pkg/types" + azuretypes "github.com/openshift/installer/pkg/types/azure" +) + +// NodeDiskSetup determines the path per disk type, and per platform and role, runs ForDiskSetup. +func NodeDiskSetup(installConfig *installconfig.InstallConfig, role string, diskSetup types.Disk, dataDisk any) (*v1.MachineConfig, error) { + var path string + + ic := installConfig.Config + + label := string(diskSetup.Type) + + switch diskSetup.Type { + case types.Etcd: + path = "/var/lib/etcd" + case types.Swap: + path = "" + case types.UserDefined: + path = diskSetup.UserDefined.MountPath + label = diskSetup.UserDefined.PlatformDiskID + } + + switch ic.Platform.Name() { + case azuretypes.Name: + if installConfig.Config.EnabledFeatureGates().Enabled(features.FeatureGateAzureMultiDisk) { + if azureDataDisk, ok := dataDisk.(v1beta1.DataDisk); ok { + device := fmt.Sprintf("/dev/disk/azure/scsi1/lun%d", *azureDataDisk.Lun) + diskSetupIgn, err := machineconfig.ForDiskSetup(role, device, label, path, diskSetup.Type) + if err != nil { + return nil, errors.Wrap(err, "failed to create ignition to setup disks for master machines") + } + return diskSetupIgn, nil + } + return nil, errors.Errorf("unsupported azure data disk type") + } + default: + return nil, errors.Errorf("unsupported platform %q", ic.Platform.Name()) + } + return nil, nil +} diff --git a/pkg/asset/machines/worker.go b/pkg/asset/machines/worker.go index 09479dd188e..19304870149 100644 --- a/pkg/asset/machines/worker.go +++ b/pkg/asset/machines/worker.go @@ -18,6 +18,7 @@ import ( "sigs.k8s.io/yaml" configv1 "github.com/openshift/api/config/v1" + "github.com/openshift/api/features" machinev1 "github.com/openshift/api/machine/v1" machinev1alpha1 "github.com/openshift/api/machine/v1alpha1" machinev1beta1 "github.com/openshift/api/machine/v1beta1" @@ -373,6 +374,28 @@ func (w *Worker) Generate(ctx context.Context, dependencies asset.Parents) error machineConfigs = append(machineConfigs, ignRoutes) } } + if installConfig.Config.EnabledFeatureGates().Enabled(features.FeatureGateMultiDiskSetup) { + for i, diskSetup := range pool.DiskSetup { + var dataDisk any + switch ic.Platform.Name() { + // Each platform has their unique dataDisk type + case azuretypes.Name: + if i < len(pool.Platform.Azure.DataDisks) { + dataDisk = pool.Platform.Azure.DataDisks[i] + } + default: + return errors.Errorf("disk setup for %s is not supported", ic.Platform.Name()) + } + + if dataDisk != nil { + diskSetupIgn, err := NodeDiskSetup(installConfig, "worker", diskSetup, dataDisk) + if err != nil { + return errors.Wrap(err, "failed to create ignition to setup disks for compute") + } + machineConfigs = append(machineConfigs, diskSetupIgn) + } + } + } // The maximum number of networks supported on ServiceNetwork is two, one IPv4 and one IPv6 network. // The cluster-network-operator handles the validation of this field. // Reference: https://github.com/openshift/cluster-network-operator/blob/fc3e0e25b4cfa43e14122bdcdd6d7f2585017d75/pkg/network/cluster_config.go#L45-L52 diff --git a/pkg/types/azure/validation/machinepool.go b/pkg/types/azure/validation/machinepool.go index 04db1eaeff9..a02959dc019 100644 --- a/pkg/types/azure/validation/machinepool.go +++ b/pkg/types/azure/validation/machinepool.go @@ -3,6 +3,7 @@ package validation import ( "fmt" "sort" + "strings" "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/util/sets" @@ -35,6 +36,7 @@ var ( ) // ValidateMachinePool checks that the specified machine pool is valid. +// nolint:gocyclo func ValidateMachinePool(p *azure.MachinePool, poolName string, platform *azure.Platform, pool *types.MachinePool, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} @@ -109,8 +111,20 @@ func ValidateMachinePool(p *azure.MachinePool, poolName string, platform *azure. } } + // dataDisks in defaultMachinePool is unsupported + if poolName == "" { + if len(p.DataDisks) > 0 { + var dataDiskNames []string + for _, d := range p.DataDisks { + dataDiskNames = append(dataDiskNames, d.NameSuffix) + } + + allErrs = append(allErrs, field.Invalid(fldPath.Child("dataDisks"), strings.Join(dataDiskNames, ","), "not allowed on default machine pool, use dataDisks compute and controlPlane only")) + } + } + if pool != nil { - if len(p.DataDisks) != 0 { + if len(p.DataDisks) != 0 && len(pool.DiskSetup) != 0 { allErrs = append(allErrs, validateDataDiskSetup(p, pool, fldPath.Child("dataDisks"))...) } } @@ -124,16 +138,51 @@ func ValidateMachinePool(p *azure.MachinePool, poolName string, platform *azure. func validateDataDiskSetup(azurePool *azure.MachinePool, pool *types.MachinePool, fldPath *field.Path) field.ErrorList { var allErrs field.ErrorList - for _, d := range azurePool.DataDisks { + // We could have a situation where the azure DataDisks are + // defined but no corresponding disk setup but we should never have + // more DiskSetup than DataDisks + if len(azurePool.DataDisks) < len(pool.DiskSetup) { + allErrs = append(allErrs, field.TooLong(fldPath, pool.DiskSetup, len(azurePool.DataDisks))) + // return early if disksetup and datadisks don't match lengths + return allErrs + } + + lunNumbers := make(map[int32]interface{}) + for i, d := range azurePool.DataDisks { if d.Lun == nil { - allErrs = append(allErrs, field.Required(fldPath.Child("Lun"), fmt.Sprintf("%s must have lun id", d.NameSuffix))) - } else if *(d.Lun) < 0 || *(d.Lun) > 63 { - allErrs = append(allErrs, field.Required(fldPath.Child("Lun"), fmt.Sprintf("%s must have lun id between 0 and 63", d.NameSuffix))) + allErrs = append(allErrs, field.Required(fldPath.Child("Lun"), fmt.Sprintf("%q must have lun id", d.NameSuffix))) + } else { + if *(d.Lun) < 0 || *(d.Lun) > 63 { + allErrs = append(allErrs, field.Required(fldPath.Child("Lun"), fmt.Sprintf("%q must have lun id between 0 and 63", d.NameSuffix))) + } + if _, ok := lunNumbers[*d.Lun]; ok { + allErrs = append(allErrs, field.Invalid(fldPath.Child("Lun"), d.NameSuffix, "dataDisk must have a unique lun number")) + } else { + lunNumbers[*d.Lun] = struct{}{} + } } - if d.DiskSizeGB == 0 { + if d.DiskSizeGB <= 0 { allErrs = append(allErrs, field.Invalid(fldPath.Child("DiskSizeGB"), d.DiskSizeGB, "diskSizeGB must be greater than zero")) } + + if i < len(pool.DiskSetup) { + setup := pool.DiskSetup[i] + switch setup.Type { + case types.Etcd: + if setup.Etcd != nil && setup.Etcd.PlatformDiskID != d.NameSuffix { + allErrs = append(allErrs, field.Invalid(fldPath.Child("NameSuffix"), d.NameSuffix, fmt.Sprintf("does not match etcd PlatformDiskID %q", setup.Etcd.PlatformDiskID))) + } + case types.Swap: + if setup.Swap != nil && setup.Swap.PlatformDiskID != d.NameSuffix { + allErrs = append(allErrs, field.Invalid(fldPath.Child("NameSuffix"), d.NameSuffix, fmt.Sprintf("does not match swap PlatformDiskID %q", setup.Swap.PlatformDiskID))) + } + case types.UserDefined: + if setup.UserDefined != nil && setup.UserDefined.PlatformDiskID != d.NameSuffix { + allErrs = append(allErrs, field.Invalid(fldPath.Child("NameSuffix"), d.NameSuffix, fmt.Sprintf("does not match user defined PlatformDiskID %q", setup.UserDefined.PlatformDiskID))) + } + } + } } return allErrs diff --git a/pkg/types/azure/validation/machinepool_test.go b/pkg/types/azure/validation/machinepool_test.go index 11ae6c8e11f..e75802fe787 100644 --- a/pkg/types/azure/validation/machinepool_test.go +++ b/pkg/types/azure/validation/machinepool_test.go @@ -93,6 +93,12 @@ func TestValidateMachinePool(t *testing.T) { azurePlatform: azure.PublicCloud, pool: &types.MachinePool{ Name: "master", + DiskSetup: []types.Disk{{ + Type: "etcd", + Etcd: &types.DiskEtcd{ + PlatformDiskID: "etcd", + }, + }}, Platform: types.MachinePoolPlatform{ Azure: &azure.MachinePool{ DataDisks: []capz.DataDisk{{ @@ -105,13 +111,20 @@ func TestValidateMachinePool(t *testing.T) { }, }, }, - expected: `^test-path\.dataDisks\.Lun: Required value: etcd must have lun id$`, + expected: `^test-path\.dataDisks\.Lun: Required value: \"etcd\" must have lun id$`, }, { name: "lun id must be below 64", azurePlatform: azure.PublicCloud, + pool: &types.MachinePool{ Name: "master", + DiskSetup: []types.Disk{{ + Type: "etcd", + Etcd: &types.DiskEtcd{ + PlatformDiskID: "etcd", + }, + }}, Platform: types.MachinePoolPlatform{ Azure: &azure.MachinePool{ DataDisks: []capz.DataDisk{{ @@ -124,13 +137,44 @@ func TestValidateMachinePool(t *testing.T) { }, }, }, - expected: `^test-path\.dataDisks\.Lun: Required value: etcd must have lun id between 0 and 63$`, + expected: `^test-path\.dataDisks\.Lun: Required value: \"etcd\" must have lun id between 0 and 63$`, + }, + { + name: "multiple disk and setup PlatformDiskID does not match", + azurePlatform: azure.PublicCloud, + pool: &types.MachinePool{ + Name: "master", + DiskSetup: []types.Disk{{ + Type: "etcd", + Etcd: &types.DiskEtcd{ + PlatformDiskID: "etcd", + }, + }}, + Platform: types.MachinePoolPlatform{ + Azure: &azure.MachinePool{ + DataDisks: []capz.DataDisk{{ + NameSuffix: "foo", + DiskSizeGB: 1, + ManagedDisk: nil, + Lun: pointer.Int32(0), + }, + }, + }, + }, + }, + expected: `^test-path\.dataDisks\.NameSuffix: Invalid value: \"foo\": does not match etcd PlatformDiskID \"etcd\"$`, }, { name: "lun id must be above 0", azurePlatform: azure.PublicCloud, pool: &types.MachinePool{ Name: "master", + DiskSetup: []types.Disk{{ + Type: "etcd", + Etcd: &types.DiskEtcd{ + PlatformDiskID: "etcd", + }, + }}, Platform: types.MachinePoolPlatform{ Azure: &azure.MachinePool{ DataDisks: []capz.DataDisk{{ @@ -143,13 +187,19 @@ func TestValidateMachinePool(t *testing.T) { }, }, }, - expected: `^test-path\.dataDisks\.Lun: Required value: etcd must have lun id between 0 and 63$`, + expected: `^test-path\.dataDisks\.Lun: Required value: \"etcd\" must have lun id between 0 and 63$`, }, { name: "multiple disk size must be greater than zero", azurePlatform: azure.PublicCloud, pool: &types.MachinePool{ Name: "master", + DiskSetup: []types.Disk{{ + Type: "etcd", + Etcd: &types.DiskEtcd{ + PlatformDiskID: "etcd", + }, + }}, Platform: types.MachinePoolPlatform{ Azure: &azure.MachinePool{ DataDisks: []capz.DataDisk{{ @@ -164,6 +214,69 @@ func TestValidateMachinePool(t *testing.T) { }, expected: `^test-path\.dataDisks\.DiskSizeGB: Invalid value: 0: diskSizeGB must be greater than zero$`, }, + { + name: "datadisks in default machine pool is invalid", + azurePlatform: azure.PublicCloud, + pool: &types.MachinePool{ + Name: "", + DiskSetup: []types.Disk{}, + Platform: types.MachinePoolPlatform{ + Azure: &azure.MachinePool{ + DataDisks: []capz.DataDisk{{ + NameSuffix: "etcd", + DiskSizeGB: 0, + ManagedDisk: nil, + Lun: pointer.Int32(0), + CachingType: "", + }}, + }, + }, + }, + expected: `^test-path\.dataDisks: Invalid value: \"etcd\": not allowed on default machine pool, use dataDisks compute and controlPlane only$`, + }, + { + name: "lun id must be unique", + azurePlatform: azure.PublicCloud, + pool: &types.MachinePool{ + Name: "master", + DiskSetup: []types.Disk{ + { + Type: "etcd", + Etcd: &types.DiskEtcd{ + PlatformDiskID: "etcd", + }, + }, + { + Type: "user-defined", + UserDefined: &types.DiskUserDefined{ + PlatformDiskID: "containers", + MountPath: "/var/lib/containers", + }, + }, + }, + Platform: types.MachinePoolPlatform{ + Azure: &azure.MachinePool{ + DataDisks: []capz.DataDisk{ + { + NameSuffix: "etcd", + DiskSizeGB: 1, + ManagedDisk: nil, + Lun: pointer.Int32(0), + CachingType: "", + }, + { + NameSuffix: "containers", + DiskSizeGB: 1, + ManagedDisk: nil, + Lun: pointer.Int32(0), + CachingType: "", + }, + }, + }, + }, + }, + expected: `^test-path\.dataDisks\.Lun: Invalid value: \"containers\": dataDisk must have a unique lun number$`, + }, { name: "unsupported disk master", azurePlatform: azure.PublicCloud, diff --git a/pkg/types/defaults/validation/featuregates.go b/pkg/types/defaults/validation/featuregates.go index 8476854fcb7..b53eb1b001f 100644 --- a/pkg/types/defaults/validation/featuregates.go +++ b/pkg/types/defaults/validation/featuregates.go @@ -17,5 +17,50 @@ func GatedFeatures(c *types.InstallConfig) []featuregates.GatedInstallConfigFeat Condition: c.ControlPlane != nil && c.ControlPlane.Fencing != nil, Field: field.NewPath("platform", "none", "fencingCredentials"), }, + { + FeatureGateName: features.FeatureGateMultiDiskSetup, + Condition: c.ControlPlane != nil && len(c.ControlPlane.DiskSetup) != 0, + Field: field.NewPath("controlPlane", "diskSetup"), + }, + { + FeatureGateName: features.FeatureGateMultiDiskSetup, + Condition: func() bool { + computeMachinePool := c.Compute + for _, compute := range computeMachinePool { + if len(compute.DiskSetup) != 0 { + return true + } + } + return false + }(), + Field: field.NewPath("compute", "diskSetup"), + }, + { + FeatureGateName: features.FeatureGateNodeSwap, + Condition: func() bool { + computeMachinePool := c.Compute + for _, compute := range computeMachinePool { + for _, ds := range compute.DiskSetup { + if ds.Type == types.Swap { + return true + } + } + } + return false + }(), + Field: field.NewPath("compute", "diskSetup"), + }, + { + FeatureGateName: features.FeatureGateNodeSwap, + Condition: func() bool { + for _, ds := range c.ControlPlane.DiskSetup { + if ds.Type == types.Swap { + return true + } + } + return false + }(), + Field: field.NewPath("controlPlane", "diskSetup"), + }, } } diff --git a/pkg/types/machinepools.go b/pkg/types/machinepools.go index 806ca11a43e..307b428e0dd 100644 --- a/pkg/types/machinepools.go +++ b/pkg/types/machinepools.go @@ -50,6 +50,45 @@ const ( ArchitectureARM64 = "arm64" ) +// DiskType is the string representation of the three types disk setups +// +kubebuilder:validation:Enum=etcd;swap;user-defined +type DiskType string + +const ( + // Etcd indicates etcd disk setup. + Etcd DiskType = "etcd" + // Swap indicates swap disk setup. + Swap DiskType = "swap" + // UserDefined indicates user-defined disk setup. + UserDefined DiskType = "user-defined" +) + +// Disk defines the type of disk (etcd, swap or user-defined) and the configuration +// of each disk type. +type Disk struct { + Type DiskType `json:"type,omitempty"` + + UserDefined *DiskUserDefined `json:"userDefined,omitempty"` + Etcd *DiskEtcd `json:"etcd,omitempty"` + Swap *DiskSwap `json:"swap,omitempty"` +} + +// DiskUserDefined defines a disk type of user-defined. +type DiskUserDefined struct { + PlatformDiskID string `json:"platformDiskID,omitempty"` + MountPath string `json:"mountPath,omitempty"` +} + +// DiskSwap defines a disk type of swap. +type DiskSwap struct { + PlatformDiskID string `json:"platformDiskID,omitempty"` +} + +// DiskEtcd defines a disk type of etcd. +type DiskEtcd struct { + PlatformDiskID string `json:"platformDiskID,omitempty"` +} + // MachinePool is a pool of machines to be installed. type MachinePool struct { // Name is the name of the machine pool. @@ -83,6 +122,11 @@ type MachinePool struct { // Fencing may only be set for control plane nodes. // +optional Fencing *Fencing `json:"fencing,omitempty"` + + // DiskSetup stores the type of disks that will be setup with MachineConfigs. + // The available types are etcd, swap and user-defined. + // +optional + DiskSetup []Disk `json:"diskSetup,omitempty"` } // MachinePoolPlatform is the platform-specific configuration for a machine diff --git a/pkg/types/validation/machinepools.go b/pkg/types/validation/machinepools.go index 8d8c42fd157..b2ce4aada77 100644 --- a/pkg/types/validation/machinepools.go +++ b/pkg/types/validation/machinepools.go @@ -4,6 +4,7 @@ import ( "fmt" "k8s.io/apimachinery/pkg/util/validation/field" + "sigs.k8s.io/yaml" "github.com/openshift/installer/pkg/types" "github.com/openshift/installer/pkg/types/aws" @@ -75,10 +76,67 @@ func ValidateMachinePool(platform *types.Platform, p *types.MachinePool, fldPath if platform.AWS != nil { allErrs = append(allErrs, awsvalidation.ValidateMachinePoolArchitecture(p, fldPath.Child("architecture"))...) } + + allErrs = append(allErrs, validateDiskSetup(p, fldPath.Child("diskSetup"))...) + allErrs = append(allErrs, validateMachinePoolPlatform(platform, &p.Platform, p, fldPath.Child("platform"))...) return allErrs } +func validateDiskSetup(p *types.MachinePool, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + foundEtcd := false + foundSwap := false + for _, ds := range p.DiskSetup { + // outputting the yaml to make recognizing the issue easier for the user + dsBytes, err := yaml.Marshal(ds) + if err != nil { + allErrs = append(allErrs, field.InternalError(fldPath, err)) + } + dsYaml := string(dsBytes) + switch ds.Type { + case types.UserDefined: + if ds.UserDefined == nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("userDefined"), dsYaml, "userDefined configuration must be created")) + continue + } + if len(ds.UserDefined.PlatformDiskID) > 12 { + userDefinedPath := fldPath.Child("userDefined") + allErrs = append(allErrs, field.Invalid(userDefinedPath.Child("platformDiskId"), dsYaml, "cannot be longer than 12 characters")) + continue + } + case types.Etcd: + if foundEtcd { + allErrs = append(allErrs, field.TooMany(fldPath.Child("etcd"), 2, 1)) + continue + } + if ds.Etcd == nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("etcd"), dsYaml, "etcd configuration must be created")) + continue + } + // etcd should only be setup on control plane, not any other machine type. + if p.Name != "master" { + allErrs = append(allErrs, field.Invalid(fldPath.Child("etcd"), dsYaml, "cannot specify etcd on worker machine pools")) + continue + } + foundEtcd = true + case types.Swap: + if foundSwap { + allErrs = append(allErrs, field.TooMany(fldPath.Child("swap"), 2, 1)) + continue + } + if ds.Swap == nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("swap"), dsYaml, "swap configuration must be created")) + continue + } + foundSwap = true + } + } + + return allErrs +} + func validateMachinePoolPlatform(platform *types.Platform, p *types.MachinePoolPlatform, pool *types.MachinePool, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} platformName := platform.Name() diff --git a/pkg/types/validation/machinepools_test.go b/pkg/types/validation/machinepools_test.go index ed8446834c7..887b1126d73 100644 --- a/pkg/types/validation/machinepools_test.go +++ b/pkg/types/validation/machinepools_test.go @@ -24,12 +24,15 @@ func validMachinePool(name string) *types.MachinePool { } } +// Cursor generated disk Setup tests + func TestValidateMachinePool(t *testing.T) { cases := []struct { - name string - platform *types.Platform - pool *types.MachinePool - valid bool + name string + platform *types.Platform + pool *types.MachinePool + valid bool + expectedError string }{ { name: "minimal", @@ -248,13 +251,251 @@ func TestValidateMachinePool(t *testing.T) { }(), valid: true, }, + { + name: "valid multiple disks", + platform: &types.Platform{Azure: &azure.Platform{Region: "eastus"}}, + pool: func() *types.MachinePool { + p := validMachinePool("master") + p.DiskSetup = append(p.DiskSetup, types.Disk{ + Type: "etcd", + UserDefined: nil, + Etcd: &types.DiskEtcd{PlatformDiskID: "etcd"}, + Swap: nil, + }) + p.Platform = types.MachinePoolPlatform{ + Azure: &azure.MachinePool{}, + } + return p + }(), + valid: true, + }, + { + name: "invalid etcd disk type on worker machine pool", + platform: &types.Platform{Azure: &azure.Platform{Region: "eastus"}}, + pool: func() *types.MachinePool { + p := validMachinePool("worker") + p.DiskSetup = append(p.DiskSetup, types.Disk{ + Type: "etcd", + UserDefined: nil, + Etcd: &types.DiskEtcd{PlatformDiskID: "etcd"}, + Swap: nil, + }) + p.Platform = types.MachinePoolPlatform{ + Azure: &azure.MachinePool{}, + } + return p + }(), + expectedError: `^test-path\.diskSetup\.etcd: Invalid value: "etcd:\\n platformDiskID: etcd\\ntype: etcd\\n": cannot specify etcd on worker machine pools$`, + }, + { + name: "valid etcd disk on master machine pool", + platform: &types.Platform{Azure: &azure.Platform{Region: "eastus"}}, + pool: func() *types.MachinePool { + p := validMachinePool("master") + p.DiskSetup = append(p.DiskSetup, types.Disk{ + Type: "etcd", + UserDefined: nil, + Etcd: &types.DiskEtcd{PlatformDiskID: "etcd"}, + Swap: nil, + }) + p.Platform = types.MachinePoolPlatform{ + Azure: &azure.MachinePool{}, + } + return p + }(), + valid: true, + }, + { + name: "invalid etcd disk with nil Etcd field", + platform: &types.Platform{Azure: &azure.Platform{Region: "eastus"}}, + pool: func() *types.MachinePool { + p := validMachinePool("master") + p.DiskSetup = append(p.DiskSetup, types.Disk{ + Type: "etcd", + UserDefined: nil, + Etcd: nil, + Swap: nil, + }) + p.Platform = types.MachinePoolPlatform{ + Azure: &azure.MachinePool{}, + } + return p + }(), + expectedError: `^test-path\.diskSetup\.etcd: Invalid value: "type: etcd\\n": etcd configuration must be created$`, + }, + { + name: "valid swap disk", + platform: &types.Platform{Azure: &azure.Platform{Region: "eastus"}}, + pool: func() *types.MachinePool { + p := validMachinePool("worker") + p.DiskSetup = append(p.DiskSetup, types.Disk{ + Type: "swap", + UserDefined: nil, + Etcd: nil, + Swap: &types.DiskSwap{PlatformDiskID: "swap"}, + }) + p.Platform = types.MachinePoolPlatform{ + Azure: &azure.MachinePool{}, + } + return p + }(), + valid: true, + }, + { + name: "invalid swap disk with nil Swap field", + platform: &types.Platform{Azure: &azure.Platform{Region: "eastus"}}, + pool: func() *types.MachinePool { + p := validMachinePool("worker") + p.DiskSetup = append(p.DiskSetup, types.Disk{ + Type: "swap", + UserDefined: nil, + Etcd: nil, + Swap: nil, + }) + p.Platform = types.MachinePoolPlatform{ + Azure: &azure.MachinePool{}, + } + return p + }(), + expectedError: `^test-path\.diskSetup\.swap: Invalid value: "type: swap\\n": swap configuration must be created$`, + }, + { + name: "valid user-defined disk", + platform: &types.Platform{Azure: &azure.Platform{Region: "eastus"}}, + pool: func() *types.MachinePool { + p := validMachinePool("worker") + p.DiskSetup = append(p.DiskSetup, types.Disk{ + Type: "user-defined", + UserDefined: &types.DiskUserDefined{PlatformDiskID: "userdisk", MountPath: "/mnt/data"}, + Etcd: nil, + Swap: nil, + }) + p.Platform = types.MachinePoolPlatform{ + Azure: &azure.MachinePool{}, + } + return p + }(), + valid: true, + }, + { + name: "invalid user-defined disk platformDiskId too long", + platform: &types.Platform{Azure: &azure.Platform{Region: "eastus"}}, + pool: func() *types.MachinePool { + p := validMachinePool("worker") + p.DiskSetup = append(p.DiskSetup, types.Disk{ + Type: "user-defined", + UserDefined: &types.DiskUserDefined{PlatformDiskID: "userdiskuserdisk", MountPath: "/mnt/data"}, + Etcd: nil, + Swap: nil, + }) + p.Platform = types.MachinePoolPlatform{ + Azure: &azure.MachinePool{}, + } + return p + }(), + expectedError: `^test-path\.diskSetup\.userDefined\.platformDiskId: Invalid value: \"type: user-defined\\nuserDefined:\\n mountPath: /mnt/data\\n platformDiskID: userdiskuserdisk\\n": cannot be longer than 12 characters$`, + }, + { + name: "invalid user-defined disk with nil UserDefined field", + platform: &types.Platform{Azure: &azure.Platform{Region: "eastus"}}, + pool: func() *types.MachinePool { + p := validMachinePool("worker") + p.DiskSetup = append(p.DiskSetup, types.Disk{ + Type: "user-defined", + UserDefined: nil, + Etcd: nil, + Swap: nil, + }) + p.Platform = types.MachinePoolPlatform{ + Azure: &azure.MachinePool{}, + } + return p + }(), + expectedError: `^test-path\.diskSetup\.userDefined: Invalid value: "type: user-defined\\n": userDefined configuration must be created$`, + }, + { + name: "invalid multiple etcd disks", + platform: &types.Platform{Azure: &azure.Platform{Region: "eastus"}}, + pool: func() *types.MachinePool { + p := validMachinePool("master") + p.DiskSetup = append(p.DiskSetup, types.Disk{ + Type: "etcd", + UserDefined: nil, + Etcd: &types.DiskEtcd{PlatformDiskID: "etcd1"}, + Swap: nil, + }) + p.DiskSetup = append(p.DiskSetup, types.Disk{ + Type: "etcd", + UserDefined: nil, + Etcd: &types.DiskEtcd{PlatformDiskID: "etcd2"}, + Swap: nil, + }) + p.Platform = types.MachinePoolPlatform{ + Azure: &azure.MachinePool{}, + } + return p + }(), + expectedError: `^test-path\.diskSetup\.etcd: Too many: 2: must have at most 1 items$`, + }, + { + name: "invalid multiple swap disks", + platform: &types.Platform{Azure: &azure.Platform{Region: "eastus"}}, + pool: func() *types.MachinePool { + p := validMachinePool("worker") + p.DiskSetup = append(p.DiskSetup, types.Disk{ + Type: "swap", + UserDefined: nil, + Etcd: nil, + Swap: &types.DiskSwap{PlatformDiskID: "swap1"}, + }) + p.DiskSetup = append(p.DiskSetup, types.Disk{ + Type: "swap", + UserDefined: nil, + Etcd: nil, + Swap: &types.DiskSwap{PlatformDiskID: "swap2"}, + }) + p.Platform = types.MachinePoolPlatform{ + Azure: &azure.MachinePool{}, + } + return p + }(), + expectedError: `^test-path\.diskSetup\.swap: Too many: 2: must have at most 1 items$`, + }, + { + name: "valid mixed disk types", + platform: &types.Platform{Azure: &azure.Platform{Region: "eastus"}}, + pool: func() *types.MachinePool { + p := validMachinePool("master") + p.DiskSetup = append(p.DiskSetup, types.Disk{ + Type: "etcd", + UserDefined: nil, + Etcd: &types.DiskEtcd{PlatformDiskID: "etcd"}, + Swap: nil, + }) + p.DiskSetup = append(p.DiskSetup, types.Disk{ + Type: "user-defined", + UserDefined: &types.DiskUserDefined{PlatformDiskID: "userdisk", MountPath: "/mnt/data"}, + Etcd: nil, + Swap: nil, + }) + p.Platform = types.MachinePoolPlatform{ + Azure: &azure.MachinePool{}, + } + return p + }(), + valid: true, + }, } for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { err := ValidateMachinePool(tc.platform, tc.pool, field.NewPath("test-path")).ToAggregate() - if tc.valid { + + switch { + case tc.expectedError != "": + assert.Regexp(t, tc.expectedError, err) + case tc.valid: assert.NoError(t, err) - } else { + case !tc.valid: assert.Error(t, err) } }) diff --git a/vendor/github.com/openshift/api/config/v1/types_apiserver.go b/vendor/github.com/openshift/api/config/v1/types_apiserver.go index 38322b95d54..327ce13da38 100644 --- a/vendor/github.com/openshift/api/config/v1/types_apiserver.go +++ b/vendor/github.com/openshift/api/config/v1/types_apiserver.go @@ -155,6 +155,7 @@ type APIServerServingCerts struct { // the defaultServingCertificate will be used. // +optional // +listType=atomic + // +kubebuilder:validation:MaxItems=32 NamedCertificates []APIServerNamedServingCert `json:"namedCertificates,omitempty"` } @@ -165,6 +166,7 @@ type APIServerNamedServingCert struct { // Exact names trump over wildcard names. Explicit names defined here trump over extracted implicit names. // +optional // +listType=atomic + // +kubebuilder:validation:MaxItems=64 Names []string `json:"names,omitempty"` // servingCertificate references a kubernetes.io/tls type secret containing the TLS cert info for serving secure traffic. // The secret must exist in the openshift-config namespace and contain the following required fields: diff --git a/vendor/github.com/openshift/api/config/v1/types_authentication.go b/vendor/github.com/openshift/api/config/v1/types_authentication.go index 02c586b323c..f91c9cbf0bc 100644 --- a/vendor/github.com/openshift/api/config/v1/types_authentication.go +++ b/vendor/github.com/openshift/api/config/v1/types_authentication.go @@ -224,11 +224,11 @@ type OIDCProvider struct { // +optional OIDCClients []OIDCClientConfig `json:"oidcClients"` - // claimMappings is an optional field that configures the rules to be used by + // claimMappings is a required field that configures the rules to be used by // the Kubernetes API server for translating claims in a JWT token, issued // by the identity provider, to a cluster identity. // - // +optional + // +required ClaimMappings TokenClaimMappings `json:"claimMappings"` // claimValidationRules is an optional field that configures the rules to @@ -284,11 +284,11 @@ type TokenIssuer struct { } type TokenClaimMappings struct { - // username is an optional field that configures how the username of a cluster identity + // username is a required field that configures how the username of a cluster identity // should be constructed from the claims in a JWT token issued by the identity provider. // - // +optional - Username UsernameClaimMapping `json:"username,omitempty"` + // +required + Username UsernameClaimMapping `json:"username"` // groups is an optional field that configures how the groups of a cluster identity // should be constructed from the claims in a JWT token issued @@ -607,7 +607,16 @@ type OIDCClientReference struct { // +kubebuilder:validation:XValidation:rule="has(self.prefixPolicy) && self.prefixPolicy == 'Prefix' ? (has(self.prefix) && size(self.prefix.prefixString) > 0) : !has(self.prefix)",message="prefix must be set if prefixPolicy is 'Prefix', but must remain unset otherwise" // +union type UsernameClaimMapping struct { - TokenClaimMapping `json:",inline"` + // claim is a required field that configures the JWT token + // claim whose value is assigned to the cluster identity + // field associated with this mapping. + // + // claim must not be an empty string ("") and must not exceed 256 characters. + // + // +required + // +kubebuilder:validation:MinLength:=1 + // +kubebuilder:validation:MaxLength:=256 + Claim string `json:"claim"` // prefixPolicy is an optional field that configures how a prefix should be // applied to the value of the JWT claim specified in the 'claim' field. diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_image_policy.go b/vendor/github.com/openshift/api/config/v1/types_cluster_image_policy.go new file mode 100644 index 00000000000..ca604e05c5b --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_cluster_image_policy.go @@ -0,0 +1,87 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterImagePolicy holds cluster-wide configuration for image signature verification +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=clusterimagepolicies,scope=Cluster +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/2310 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +openshift:enable:FeatureGate=SigstoreImageVerification +// +openshift:compatibility-gen:level=1 +type ClusterImagePolicy struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata"` + + // spec contains the configuration for the cluster image policy. + // +required + Spec ClusterImagePolicySpec `json:"spec"` + // status contains the observed state of the resource. + // +optional + Status ClusterImagePolicyStatus `json:"status"` +} + +// CLusterImagePolicySpec is the specification of the ClusterImagePolicy custom resource. +type ClusterImagePolicySpec struct { + // scopes is a required field that defines the list of image identities assigned to a policy. Each item refers to a scope in a registry implementing the "Docker Registry HTTP API V2". + // Scopes matching individual images are named Docker references in the fully expanded form, either using a tag or digest. For example, docker.io/library/busybox:latest (not busybox:latest). + // More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest), a repository + // namespace, or a registry host (by only specifying the host name and possibly a port number) or a wildcard expression starting with `*.`, for matching all subdomains (not including a port number). + // Wildcards are only supported for subdomain matching, and may not be used in the middle of the host, i.e. *.example.com is a valid case, but example*.*.com is not. + // This support no more than 256 scopes in one object. If multiple scopes match a given image, only the policy requirements for the most specific scope apply. The policy requirements for more general scopes are ignored. + // In addition to setting a policy appropriate for your own deployed applications, make sure that a policy on the OpenShift image repositories + // quay.io/openshift-release-dev/ocp-release, quay.io/openshift-release-dev/ocp-v4.0-art-dev (or on a more general scope) allows deployment of the OpenShift images required for cluster operation. + // If a scope is configured in both the ClusterImagePolicy and the ImagePolicy, or if the scope in ImagePolicy is nested under one of the scopes from the ClusterImagePolicy, only the policy from the ClusterImagePolicy will be applied. + // For additional details about the format, please refer to the document explaining the docker transport field, + // which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker + // +required + // +kubebuilder:validation:MaxItems=256 + // +listType=set + Scopes []ImageScope `json:"scopes"` + // policy is a required field that contains configuration to allow scopes to be verified, and defines how + // images not matching the verification policy will be treated. + // +required + Policy Policy `json:"policy"` +} + +// +k8s:deepcopy-gen=true +type ClusterImagePolicyStatus struct { + // conditions provide details on the status of this API Resource. + // +kubebuilder:validation:MaxItems=8 + // +kubebuilder:validation:MinItems=1 + // +listType=map + // +listMapKey=type + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterImagePolicyList is a list of ClusterImagePolicy resources +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ClusterImagePolicyList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +required + metav1.ListMeta `json:"metadata"` + + // items is a list of ClusterImagePolices + // +kubebuilder:validation:MaxItems=1000 + // +required + Items []ClusterImagePolicy `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go index 092bebff09b..b89d487ca4c 100644 --- a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go +++ b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go @@ -83,8 +83,8 @@ type ClusterVersionSpec struct { // // +optional Upstream URL `json:"upstream,omitempty"` - // channel is an identifier for explicitly requesting that a non-default - // set of updates be applied to this cluster. The default channel will be + // channel is an identifier for explicitly requesting a non-default set + // of updates to be applied to this cluster. The default channel will // contain stable updates that are appropriate for production clusters. // // +optional diff --git a/vendor/github.com/openshift/api/config/v1/types_image_policy.go b/vendor/github.com/openshift/api/config/v1/types_image_policy.go new file mode 100644 index 00000000000..54bd21adb4e --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_image_policy.go @@ -0,0 +1,322 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImagePolicy holds namespace-wide configuration for image signature verification +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=imagepolicies,scope=Namespaced +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/2310 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +openshift:enable:FeatureGate=SigstoreImageVerification +// +openshift:compatibility-gen:level=1 +type ImagePolicy struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata"` + + // spec holds user settable values for configuration + // +required + Spec ImagePolicySpec `json:"spec"` + // status contains the observed state of the resource. + // +optional + Status ImagePolicyStatus `json:"status"` +} + +// ImagePolicySpec is the specification of the ImagePolicy CRD. +type ImagePolicySpec struct { + // scopes is a required field that defines the list of image identities assigned to a policy. Each item refers to a scope in a registry implementing the "Docker Registry HTTP API V2". + // Scopes matching individual images are named Docker references in the fully expanded form, either using a tag or digest. For example, docker.io/library/busybox:latest (not busybox:latest). + // More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest), a repository + // namespace, or a registry host (by only specifying the host name and possibly a port number) or a wildcard expression starting with `*.`, for matching all subdomains (not including a port number). + // Wildcards are only supported for subdomain matching, and may not be used in the middle of the host, i.e. *.example.com is a valid case, but example*.*.com is not. + // This support no more than 256 scopes in one object. If multiple scopes match a given image, only the policy requirements for the most specific scope apply. The policy requirements for more general scopes are ignored. + // In addition to setting a policy appropriate for your own deployed applications, make sure that a policy on the OpenShift image repositories + // quay.io/openshift-release-dev/ocp-release, quay.io/openshift-release-dev/ocp-v4.0-art-dev (or on a more general scope) allows deployment of the OpenShift images required for cluster operation. + // If a scope is configured in both the ClusterImagePolicy and the ImagePolicy, or if the scope in ImagePolicy is nested under one of the scopes from the ClusterImagePolicy, only the policy from the ClusterImagePolicy will be applied. + // For additional details about the format, please refer to the document explaining the docker transport field, + // which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker + // +required + // +kubebuilder:validation:MaxItems=256 + // +listType=set + Scopes []ImageScope `json:"scopes"` + // policy is a required field that contains configuration to allow scopes to be verified, and defines how + // images not matching the verification policy will be treated. + // +required + Policy Policy `json:"policy"` +} + +// +kubebuilder:validation:XValidation:rule="size(self.split('/')[0].split('.')) == 1 ? self.split('/')[0].split('.')[0].split(':')[0] == 'localhost' : true",message="invalid image scope format, scope must contain a fully qualified domain name or 'localhost'" +// +kubebuilder:validation:XValidation:rule=`self.contains('*') ? self.matches('^\\*(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+$') : true`,message="invalid image scope with wildcard, a wildcard can only be at the start of the domain and is only supported for subdomain matching, not path matching" +// +kubebuilder:validation:XValidation:rule=`!self.contains('*') ? self.matches('^((((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?)(?::([\\w][\\w.-]{0,127}))?(?:@([A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}))?$') : true`,message="invalid repository namespace or image specification in the image scope" +// +kubebuilder:validation:MaxLength=512 +type ImageScope string + +// Policy defines the verification policy for the items in the scopes list. +type Policy struct { + // rootOfTrust is a required field that defines the root of trust for verifying image signatures during retrieval. + // This allows image consumers to specify policyType and corresponding configuration of the policy, matching how the policy was generated. + // +required + RootOfTrust PolicyRootOfTrust `json:"rootOfTrust"` + // signedIdentity is an optional field specifies what image identity the signature claims about the image. This is useful when the image identity in the signature differs from the original image spec, such as when mirror registry is configured for the image scope, the signature from the mirror registry contains the image identity of the mirror instead of the original scope. + // The required matchPolicy field specifies the approach used in the verification process to verify the identity in the signature and the actual image identity, the default matchPolicy is "MatchRepoDigestOrExact". + // +optional + SignedIdentity *PolicyIdentity `json:"signedIdentity,omitempty"` +} + +// PolicyRootOfTrust defines the root of trust based on the selected policyType. +// +union +// +kubebuilder:validation:XValidation:rule="has(self.policyType) && self.policyType == 'PublicKey' ? has(self.publicKey) : !has(self.publicKey)",message="publicKey is required when policyType is PublicKey, and forbidden otherwise" +// +kubebuilder:validation:XValidation:rule="has(self.policyType) && self.policyType == 'FulcioCAWithRekor' ? has(self.fulcioCAWithRekor) : !has(self.fulcioCAWithRekor)",message="fulcioCAWithRekor is required when policyType is FulcioCAWithRekor, and forbidden otherwise" +// +openshift:validation:FeatureGateAwareXValidation:featureGate=SigstoreImageVerificationPKI,rule="has(self.policyType) && self.policyType == 'PKI' ? has(self.pki) : !has(self.pki)",message="pki is required when policyType is PKI, and forbidden otherwise" +type PolicyRootOfTrust struct { + // policyType is a required field specifies the type of the policy for verification. This field must correspond to how the policy was generated. + // Allowed values are "PublicKey", "FulcioCAWithRekor", and "PKI". + // When set to "PublicKey", the policy relies on a sigstore publicKey and may optionally use a Rekor verification. + // When set to "FulcioCAWithRekor", the policy is based on the Fulcio certification and incorporates a Rekor verification. + // When set to "PKI", the policy is based on the certificates from Bring Your Own Public Key Infrastructure (BYOPKI). This value is enabled by turning on the SigstoreImageVerificationPKI feature gate. + // +unionDiscriminator + // +required + PolicyType PolicyType `json:"policyType"` + // publicKey defines the root of trust configuration based on a sigstore public key. Optionally include a Rekor public key for Rekor verification. + // publicKey is required when policyType is PublicKey, and forbidden otherwise. + // +optional + PublicKey *PublicKey `json:"publicKey,omitempty"` + // fulcioCAWithRekor defines the root of trust configuration based on the Fulcio certificate and the Rekor public key. + // fulcioCAWithRekor is required when policyType is FulcioCAWithRekor, and forbidden otherwise + // For more information about Fulcio and Rekor, please refer to the document at: + // https://github.com/sigstore/fulcio and https://github.com/sigstore/rekor + // +optional + FulcioCAWithRekor *FulcioCAWithRekor `json:"fulcioCAWithRekor,omitempty"` + // pki defines the root of trust configuration based on Bring Your Own Public Key Infrastructure (BYOPKI) Root CA(s) and corresponding intermediate certificates. + // pki is required when policyType is PKI, and forbidden otherwise. + // +optional + // +openshift:enable:FeatureGate=SigstoreImageVerificationPKI + PKI *PKI `json:"pki,omitempty"` +} + +// +openshift:validation:FeatureGateAwareEnum:featureGate="",enum=PublicKey;FulcioCAWithRekor +// +openshift:validation:FeatureGateAwareEnum:featureGate=SigstoreImageVerificationPKI,enum=PublicKey;FulcioCAWithRekor;PKI +type PolicyType string + +const ( + PublicKeyRootOfTrust PolicyType = "PublicKey" + FulcioCAWithRekorRootOfTrust PolicyType = "FulcioCAWithRekor" + PKIRootOfTrust PolicyType = "PKI" +) + +// PublicKey defines the root of trust based on a sigstore public key. +type PublicKey struct { + // keyData is a required field contains inline base64-encoded data for the PEM format public key. + // keyData must be at most 8192 characters. + // +required + // +kubebuilder:validation:MaxLength=8192 + // +kubebuilder:validation:MinLength=68 + // +kubebuilder:validation:XValidation:rule="string(self).startsWith('-----BEGIN PUBLIC KEY-----')",message="the keyData must start with base64 encoding of '-----BEGIN PUBLIC KEY-----'." + // +kubebuilder:validation:XValidation:rule="string(self).endsWith('-----END PUBLIC KEY-----\\n') || string(self).endsWith('-----END PUBLIC KEY-----')",message="the keyData must end with base64 encoding of '-----END PUBLIC KEY-----'." + KeyData []byte `json:"keyData"` + // rekorKeyData is an optional field contains inline base64-encoded data for the PEM format from the Rekor public key. + // rekorKeyData must be at most 8192 characters. + // +optional + // +kubebuilder:validation:MaxLength=8192 + // +kubebuilder:validation:XValidation:rule="string(self).startsWith('-----BEGIN PUBLIC KEY-----')",message="the rekorKeyData must start with base64 encoding of '-----BEGIN PUBLIC KEY-----'." + // +kubebuilder:validation:XValidation:rule="string(self).endsWith('-----END PUBLIC KEY-----\\n') || string(self).endsWith('-----END PUBLIC KEY-----')",message="the rekorKeyData must end with base64 encoding of '-----END PUBLIC KEY-----'." + RekorKeyData []byte `json:"rekorKeyData,omitempty"` +} + +// FulcioCAWithRekor defines the root of trust based on the Fulcio certificate and the Rekor public key. +type FulcioCAWithRekor struct { + // fulcioCAData is a required field contains inline base64-encoded data for the PEM format fulcio CA. + // fulcioCAData must be at most 8192 characters. + // +required + // +kubebuilder:validation:MaxLength=8192 + // +kubebuilder:validation:XValidation:rule="string(self).startsWith('-----BEGIN CERTIFICATE-----')",message="the fulcioCAData must start with base64 encoding of '-----BEGIN CERTIFICATE-----'." + // +kubebuilder:validation:XValidation:rule="string(self).endsWith('-----END CERTIFICATE-----\\n') || string(self).endsWith('-----END CERTIFICATE-----')",message="the fulcioCAData must end with base64 encoding of '-----END CERTIFICATE-----'." + FulcioCAData []byte `json:"fulcioCAData"` + // rekorKeyData is a required field contains inline base64-encoded data for the PEM format from the Rekor public key. + // rekorKeyData must be at most 8192 characters. + // +required + // +kubebuilder:validation:MaxLength=8192 + // +kubebuilder:validation:XValidation:rule="string(self).startsWith('-----BEGIN PUBLIC KEY-----')",message="the rekorKeyData must start with base64 encoding of '-----BEGIN PUBLIC KEY-----'." + // +kubebuilder:validation:XValidation:rule="string(self).endsWith('-----END PUBLIC KEY-----\\n') || string(self).endsWith('-----END PUBLIC KEY-----')",message="the rekorKeyData must end with base64 encoding of '-----END PUBLIC KEY-----'." + RekorKeyData []byte `json:"rekorKeyData"` + // fulcioSubject is a required field specifies OIDC issuer and the email of the Fulcio authentication configuration. + // +required + FulcioSubject PolicyFulcioSubject `json:"fulcioSubject"` +} + +// PolicyFulcioSubject defines the OIDC issuer and the email of the Fulcio authentication configuration. +type PolicyFulcioSubject struct { + // oidcIssuer is a required filed contains the expected OIDC issuer. The oidcIssuer must be a valid URL and at most 2048 characters in length. + // It will be verified that the Fulcio-issued certificate contains a (Fulcio-defined) certificate extension pointing at this OIDC issuer URL. + // When Fulcio issues certificates, it includes a value based on an URL inside the client-provided ID token. + // Example: "https://expected.OIDC.issuer/" + // +required + // +kubebuilder:validation:MaxLength=2048 + // +kubebuilder:validation:XValidation:rule="isURL(self)",message="oidcIssuer must be a valid URL" + OIDCIssuer string `json:"oidcIssuer"` + // signedEmail is a required field holds the email address that the Fulcio certificate is issued for. + // The signedEmail must be a valid email address and at most 320 characters in length. + // Example: "expected-signing-user@example.com" + // +required + // +kubebuilder:validation:MaxLength=320 + // +kubebuilder:validation:XValidation:rule=`self.matches('^\\S+@\\S+$')`,message="invalid email address" + SignedEmail string `json:"signedEmail"` +} + +// PKI defines the root of trust based on Root CA(s) and corresponding intermediate certificates. +type PKI struct { + // caRootsData contains base64-encoded data of a certificate bundle PEM file, which contains one or more CA roots in the PEM format. The total length of the data must not exceed 8192 characters. + // +required + // +kubebuilder:validation:MaxLength=8192 + // +kubebuilder:validation:MinLength=72 + // +kubebuilder:validation:XValidation:rule="string(self).startsWith('-----BEGIN CERTIFICATE-----')",message="the caRootsData must start with base64 encoding of '-----BEGIN CERTIFICATE-----'." + // +kubebuilder:validation:XValidation:rule="string(self).endsWith('-----END CERTIFICATE-----\\n') || string(self).endsWith('-----END CERTIFICATE-----')",message="the caRootsData must end with base64 encoding of '-----END CERTIFICATE-----'." + // +kubebuilder:validation:XValidation:rule="string(self).findAll('-----BEGIN CERTIFICATE-----').size() == string(self).findAll('-----END CERTIFICATE-----').size()",message="caRootsData must be base64 encoding of valid PEM format data contain the same number of '-----BEGIN CERTIFICATE-----' and '-----END CERTIFICATE-----' markers." + CertificateAuthorityRootsData []byte `json:"caRootsData"` + // caIntermediatesData contains base64-encoded data of a certificate bundle PEM file, which contains one or more intermediate certificates in the PEM format. The total length of the data must not exceed 8192 characters. + // caIntermediatesData requires caRootsData to be set. + // +optional + // +kubebuilder:validation:XValidation:rule="string(self).startsWith('-----BEGIN CERTIFICATE-----')",message="the caIntermediatesData must start with base64 encoding of '-----BEGIN CERTIFICATE-----'." + // +kubebuilder:validation:XValidation:rule="string(self).endsWith('-----END CERTIFICATE-----\\n') || string(self).endsWith('-----END CERTIFICATE-----')",message="the caIntermediatesData must end with base64 encoding of '-----END CERTIFICATE-----'." + // +kubebuilder:validation:XValidation:rule="string(self).findAll('-----BEGIN CERTIFICATE-----').size() == string(self).findAll('-----END CERTIFICATE-----').size()",message="caIntermediatesData must be base64 encoding of valid PEM format data contain the same number of '-----BEGIN CERTIFICATE-----' and '-----END CERTIFICATE-----' markers." + // +kubebuilder:validation:MaxLength=8192 + // +kubebuilder:validation:MinLength=72 + CertificateAuthorityIntermediatesData []byte `json:"caIntermediatesData,omitempty"` + + // pkiCertificateSubject defines the requirements imposed on the subject to which the certificate was issued. + // +required + PKICertificateSubject PKICertificateSubject `json:"pkiCertificateSubject"` +} + +// PKICertificateSubject defines the requirements imposed on the subject to which the certificate was issued. +// +kubebuilder:validation:XValidation:rule="has(self.email) || has(self.hostname)", message="at least one of email or hostname must be set in pkiCertificateSubject" +// +openshift:enable:FeatureGate=SigstoreImageVerificationPKI +type PKICertificateSubject struct { + // email specifies the expected email address imposed on the subject to which the certificate was issued, and must match the email address listed in the Subject Alternative Name (SAN) field of the certificate. + // The email must be a valid email address and at most 320 characters in length. + // +optional + // +kubebuilder:validation:MaxLength:=320 + // +kubebuilder:validation:XValidation:rule=`self.matches('^\\S+@\\S+$')`,message="invalid email address" + Email string `json:"email,omitempty"` + // hostname specifies the expected hostname imposed on the subject to which the certificate was issued, and it must match the hostname listed in the Subject Alternative Name (SAN) DNS field of the certificate. + // The hostname must be a valid dns 1123 subdomain name, optionally prefixed by '*.', and at most 253 characters in length. + // It must consist only of lowercase alphanumeric characters, hyphens, periods and the optional preceding asterisk. + // +optional + // +kubebuilder:validation:MaxLength:=253 + // +kubebuilder:validation:XValidation:rule="self.startsWith('*.') ? !format.dns1123Subdomain().validate(self.replace('*.', '', 1)).hasValue() : !format.dns1123Subdomain().validate(self).hasValue()",message="hostname must be a valid dns 1123 subdomain name, optionally prefixed by '*.'. It must consist only of lowercase alphanumeric characters, hyphens, periods and the optional preceding asterisk." + Hostname string `json:"hostname,omitempty"` +} + +// PolicyIdentity defines image identity the signature claims about the image. When omitted, the default matchPolicy is "MatchRepoDigestOrExact". +// +kubebuilder:validation:XValidation:rule="(has(self.matchPolicy) && self.matchPolicy == 'ExactRepository') ? has(self.exactRepository) : !has(self.exactRepository)",message="exactRepository is required when matchPolicy is ExactRepository, and forbidden otherwise" +// +kubebuilder:validation:XValidation:rule="(has(self.matchPolicy) && self.matchPolicy == 'RemapIdentity') ? has(self.remapIdentity) : !has(self.remapIdentity)",message="remapIdentity is required when matchPolicy is RemapIdentity, and forbidden otherwise" +// +union +type PolicyIdentity struct { + // matchPolicy is a required filed specifies matching strategy to verify the image identity in the signature against the image scope. + // Allowed values are "MatchRepoDigestOrExact", "MatchRepository", "ExactRepository", "RemapIdentity". When omitted, the default value is "MatchRepoDigestOrExact". + // When set to "MatchRepoDigestOrExact", the identity in the signature must be in the same repository as the image identity if the image identity is referenced by a digest. Otherwise, the identity in the signature must be the same as the image identity. + // When set to "MatchRepository", the identity in the signature must be in the same repository as the image identity. + // When set to "ExactRepository", the exactRepository must be specified. The identity in the signature must be in the same repository as a specific identity specified by "repository". + // When set to "RemapIdentity", the remapIdentity must be specified. The signature must be in the same as the remapped image identity. Remapped image identity is obtained by replacing the "prefix" with the specified “signedPrefix” if the the image identity matches the specified remapPrefix. + // +unionDiscriminator + // +required + MatchPolicy IdentityMatchPolicy `json:"matchPolicy"` + // exactRepository specifies the repository that must be exactly matched by the identity in the signature. + // exactRepository is required if matchPolicy is set to "ExactRepository". It is used to verify that the signature claims an identity matching this exact repository, rather than the original image identity. + // +optional + PolicyMatchExactRepository *PolicyMatchExactRepository `json:"exactRepository,omitempty"` + // remapIdentity specifies the prefix remapping rule for verifying image identity. + // remapIdentity is required if matchPolicy is set to "RemapIdentity". It is used to verify that the signature claims a different registry/repository prefix than the original image. + // +optional + PolicyMatchRemapIdentity *PolicyMatchRemapIdentity `json:"remapIdentity,omitempty"` +} + +// +kubebuilder:validation:MaxLength=512 +// +kubebuilder:validation:XValidation:rule=`self.matches('.*:([\\w][\\w.-]{0,127})$')? self.matches('^(localhost:[0-9]+)$'): true`,message="invalid repository or prefix in the signedIdentity, should not include the tag or digest" +// +kubebuilder:validation:XValidation:rule=`self.matches('^(((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$')`,message="invalid repository or prefix in the signedIdentity. The repository or prefix must starts with 'localhost' or a valid '.' separated domain. If contains registry paths, the path component names must start with at least one letter or number, with following parts able to be separated by one period, one or two underscore and multiple dashes." +type IdentityRepositoryPrefix string + +type PolicyMatchExactRepository struct { + // repository is the reference of the image identity to be matched. + // repository is required if matchPolicy is set to "ExactRepository". + // The value should be a repository name (by omitting the tag or digest) in a registry implementing the "Docker Registry HTTP API V2". For example, docker.io/library/busybox + // +required + Repository IdentityRepositoryPrefix `json:"repository"` +} + +type PolicyMatchRemapIdentity struct { + // prefix is required if matchPolicy is set to "RemapIdentity". + // prefix is the prefix of the image identity to be matched. + // If the image identity matches the specified prefix, that prefix is replaced by the specified “signedPrefix” (otherwise it is used as unchanged and no remapping takes place). + // This is useful when verifying signatures for a mirror of some other repository namespace that preserves the vendor’s repository structure. + // The prefix and signedPrefix values can be either host[:port] values (matching exactly the same host[:port], string), repository namespaces, + // or repositories (i.e. they must not contain tags/digests), and match as prefixes of the fully expanded form. + // For example, docker.io/library/busybox (not busybox) to specify that single repository, or docker.io/library (not an empty string) to specify the parent namespace of docker.io/library/busybox. + // +required + Prefix IdentityRepositoryPrefix `json:"prefix"` + // signedPrefix is required if matchPolicy is set to "RemapIdentity". + // signedPrefix is the prefix of the image identity to be matched in the signature. The format is the same as "prefix". The values can be either host[:port] values (matching exactly the same host[:port], string), repository namespaces, + // or repositories (i.e. they must not contain tags/digests), and match as prefixes of the fully expanded form. + // For example, docker.io/library/busybox (not busybox) to specify that single repository, or docker.io/library (not an empty string) to specify the parent namespace of docker.io/library/busybox. + // +required + SignedPrefix IdentityRepositoryPrefix `json:"signedPrefix"` +} + +// IdentityMatchPolicy defines the type of matching for "matchPolicy". +// +kubebuilder:validation:Enum=MatchRepoDigestOrExact;MatchRepository;ExactRepository;RemapIdentity +type IdentityMatchPolicy string + +const ( + IdentityMatchPolicyMatchRepoDigestOrExact IdentityMatchPolicy = "MatchRepoDigestOrExact" + IdentityMatchPolicyMatchRepository IdentityMatchPolicy = "MatchRepository" + IdentityMatchPolicyExactRepository IdentityMatchPolicy = "ExactRepository" + IdentityMatchPolicyRemapIdentity IdentityMatchPolicy = "RemapIdentity" +) + +// +k8s:deepcopy-gen=true +type ImagePolicyStatus struct { + // conditions provide details on the status of this API Resource. + // condition type 'Pending' indicates that the customer resource contains a policy that cannot take effect. It is either overwritten by a global policy or the image scope is not valid. + // +kubebuilder:validation:MaxItems=8 + // +kubebuilder:validation:MinItems=1 + // +listType=map + // +listMapKey=type + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImagePolicyList is a list of ImagePolicy resources +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImagePolicyList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +required + metav1.ListMeta `json:"metadata"` + + // items is a list of ImagePolicies + // +kubebuilder:validation:MaxItems=1000 + // +required + Items []ImagePolicy `json:"items"` +} + +const ( + // ImagePolicyPending indicates that the customer resource contains a policy that cannot take effect. It is either overwritten by a global policy or the image scope is not valid. + ImagePolicyPending = "Pending" + // ImagePolicyApplied indicates that the policy has been applied + ImagePolicyApplied = "Applied" +) diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go index 38aa2f6f331..70edc176996 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go @@ -1024,6 +1024,112 @@ func (in *ClusterCondition) DeepCopy() *ClusterCondition { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterImagePolicy) DeepCopyInto(out *ClusterImagePolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterImagePolicy. +func (in *ClusterImagePolicy) DeepCopy() *ClusterImagePolicy { + if in == nil { + return nil + } + out := new(ClusterImagePolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterImagePolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterImagePolicyList) DeepCopyInto(out *ClusterImagePolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterImagePolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterImagePolicyList. +func (in *ClusterImagePolicyList) DeepCopy() *ClusterImagePolicyList { + if in == nil { + return nil + } + out := new(ClusterImagePolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterImagePolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterImagePolicySpec) DeepCopyInto(out *ClusterImagePolicySpec) { + *out = *in + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]ImageScope, len(*in)) + copy(*out, *in) + } + in.Policy.DeepCopyInto(&out.Policy) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterImagePolicySpec. +func (in *ClusterImagePolicySpec) DeepCopy() *ClusterImagePolicySpec { + if in == nil { + return nil + } + out := new(ClusterImagePolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterImagePolicyStatus) DeepCopyInto(out *ClusterImagePolicyStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterImagePolicyStatus. +func (in *ClusterImagePolicyStatus) DeepCopy() *ClusterImagePolicyStatus { + if in == nil { + return nil + } + out := new(ClusterImagePolicyStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterNetworkEntry) DeepCopyInto(out *ClusterNetworkEntry) { *out = *in @@ -2229,6 +2335,33 @@ func (in *FeatureGateTests) DeepCopy() *FeatureGateTests { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FulcioCAWithRekor) DeepCopyInto(out *FulcioCAWithRekor) { + *out = *in + if in.FulcioCAData != nil { + in, out := &in.FulcioCAData, &out.FulcioCAData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.RekorKeyData != nil { + in, out := &in.RekorKeyData, &out.RekorKeyData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + out.FulcioSubject = in.FulcioSubject + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FulcioCAWithRekor. +func (in *FulcioCAWithRekor) DeepCopy() *FulcioCAWithRekor { + if in == nil { + return nil + } + out := new(FulcioCAWithRekor) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GCPPlatformSpec) DeepCopyInto(out *GCPPlatformSpec) { *out = *in @@ -2922,6 +3055,112 @@ func (in *ImageList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePolicy) DeepCopyInto(out *ImagePolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePolicy. +func (in *ImagePolicy) DeepCopy() *ImagePolicy { + if in == nil { + return nil + } + out := new(ImagePolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImagePolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePolicyList) DeepCopyInto(out *ImagePolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ImagePolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePolicyList. +func (in *ImagePolicyList) DeepCopy() *ImagePolicyList { + if in == nil { + return nil + } + out := new(ImagePolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImagePolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePolicySpec) DeepCopyInto(out *ImagePolicySpec) { + *out = *in + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]ImageScope, len(*in)) + copy(*out, *in) + } + in.Policy.DeepCopyInto(&out.Policy) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePolicySpec. +func (in *ImagePolicySpec) DeepCopy() *ImagePolicySpec { + if in == nil { + return nil + } + out := new(ImagePolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePolicyStatus) DeepCopyInto(out *ImagePolicyStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePolicyStatus. +func (in *ImagePolicyStatus) DeepCopy() *ImagePolicyStatus { + if in == nil { + return nil + } + out := new(ImagePolicyStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ImageSpec) DeepCopyInto(out *ImageSpec) { *out = *in @@ -4732,6 +4971,49 @@ func (in *OvirtPlatformStatus) DeepCopy() *OvirtPlatformStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PKI) DeepCopyInto(out *PKI) { + *out = *in + if in.CertificateAuthorityRootsData != nil { + in, out := &in.CertificateAuthorityRootsData, &out.CertificateAuthorityRootsData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.CertificateAuthorityIntermediatesData != nil { + in, out := &in.CertificateAuthorityIntermediatesData, &out.CertificateAuthorityIntermediatesData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + out.PKICertificateSubject = in.PKICertificateSubject + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PKI. +func (in *PKI) DeepCopy() *PKI { + if in == nil { + return nil + } + out := new(PKI) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PKICertificateSubject) DeepCopyInto(out *PKICertificateSubject) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PKICertificateSubject. +func (in *PKICertificateSubject) DeepCopy() *PKICertificateSubject { + if in == nil { + return nil + } + out := new(PKICertificateSubject) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PlatformSpec) DeepCopyInto(out *PlatformSpec) { *out = *in @@ -4904,6 +5186,133 @@ func (in *PlatformStatus) DeepCopy() *PlatformStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Policy) DeepCopyInto(out *Policy) { + *out = *in + in.RootOfTrust.DeepCopyInto(&out.RootOfTrust) + if in.SignedIdentity != nil { + in, out := &in.SignedIdentity, &out.SignedIdentity + *out = new(PolicyIdentity) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy. +func (in *Policy) DeepCopy() *Policy { + if in == nil { + return nil + } + out := new(Policy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyFulcioSubject) DeepCopyInto(out *PolicyFulcioSubject) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyFulcioSubject. +func (in *PolicyFulcioSubject) DeepCopy() *PolicyFulcioSubject { + if in == nil { + return nil + } + out := new(PolicyFulcioSubject) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyIdentity) DeepCopyInto(out *PolicyIdentity) { + *out = *in + if in.PolicyMatchExactRepository != nil { + in, out := &in.PolicyMatchExactRepository, &out.PolicyMatchExactRepository + *out = new(PolicyMatchExactRepository) + **out = **in + } + if in.PolicyMatchRemapIdentity != nil { + in, out := &in.PolicyMatchRemapIdentity, &out.PolicyMatchRemapIdentity + *out = new(PolicyMatchRemapIdentity) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyIdentity. +func (in *PolicyIdentity) DeepCopy() *PolicyIdentity { + if in == nil { + return nil + } + out := new(PolicyIdentity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyMatchExactRepository) DeepCopyInto(out *PolicyMatchExactRepository) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyMatchExactRepository. +func (in *PolicyMatchExactRepository) DeepCopy() *PolicyMatchExactRepository { + if in == nil { + return nil + } + out := new(PolicyMatchExactRepository) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyMatchRemapIdentity) DeepCopyInto(out *PolicyMatchRemapIdentity) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyMatchRemapIdentity. +func (in *PolicyMatchRemapIdentity) DeepCopy() *PolicyMatchRemapIdentity { + if in == nil { + return nil + } + out := new(PolicyMatchRemapIdentity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyRootOfTrust) DeepCopyInto(out *PolicyRootOfTrust) { + *out = *in + if in.PublicKey != nil { + in, out := &in.PublicKey, &out.PublicKey + *out = new(PublicKey) + (*in).DeepCopyInto(*out) + } + if in.FulcioCAWithRekor != nil { + in, out := &in.FulcioCAWithRekor, &out.FulcioCAWithRekor + *out = new(FulcioCAWithRekor) + (*in).DeepCopyInto(*out) + } + if in.PKI != nil { + in, out := &in.PKI, &out.PKI + *out = new(PKI) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyRootOfTrust. +func (in *PolicyRootOfTrust) DeepCopy() *PolicyRootOfTrust { + if in == nil { + return nil + } + out := new(PolicyRootOfTrust) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PowerVSPlatformSpec) DeepCopyInto(out *PowerVSPlatformSpec) { *out = *in @@ -5204,6 +5613,32 @@ func (in *ProxyStatus) DeepCopy() *ProxyStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PublicKey) DeepCopyInto(out *PublicKey) { + *out = *in + if in.KeyData != nil { + in, out := &in.KeyData, &out.KeyData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.RekorKeyData != nil { + in, out := &in.RekorKeyData, &out.RekorKeyData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublicKey. +func (in *PublicKey) DeepCopy() *PublicKey { + if in == nil { + return nil + } + out := new(PublicKey) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RegistryLocation) DeepCopyInto(out *RegistryLocation) { *out = *in @@ -5902,7 +6337,6 @@ func (in *UpdateHistory) DeepCopy() *UpdateHistory { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *UsernameClaimMapping) DeepCopyInto(out *UsernameClaimMapping) { *out = *in - out.TokenClaimMapping = in.TokenClaimMapping if in.Prefix != nil { in, out := &in.Prefix, &out.Prefix *out = new(UsernamePrefix) diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml index a681631cf6d..19a304c17bf 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml @@ -66,6 +66,30 @@ builds.config.openshift.io: TopLevelFeatureGates: [] Version: v1 +clusterimagepolicies.config.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/2310 + CRDName: clusterimagepolicies.config.openshift.io + Capability: "" + Category: "" + FeatureGates: + - SigstoreImageVerification + - SigstoreImageVerificationPKI + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: ClusterImagePolicy + Labels: {} + PluralName: clusterimagepolicies + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: + - SigstoreImageVerification + Version: v1 + clusteroperators.config.openshift.io: Annotations: include.release.openshift.io/self-managed-high-availability: "true" @@ -282,6 +306,30 @@ imagedigestmirrorsets.config.openshift.io: TopLevelFeatureGates: [] Version: v1 +imagepolicies.config.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/2310 + CRDName: imagepolicies.config.openshift.io + Capability: "" + Category: "" + FeatureGates: + - SigstoreImageVerification + - SigstoreImageVerificationPKI + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: ImagePolicy + Labels: {} + PluralName: imagepolicies + PrinterColumns: [] + Scope: Namespaced + ShortNames: null + TopLevelFeatureGates: + - SigstoreImageVerification + Version: v1 + imagetagmirrorsets.config.openshift.io: Annotations: release.openshift.io/bootstrap-required: "true" diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go index 002ea77f318..eb78ad7ca66 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go @@ -447,7 +447,7 @@ var map_OIDCProvider = map[string]string{ "name": "name is a required field that configures the unique human-readable identifier associated with the identity provider. It is used to distinguish between multiple identity providers and has no impact on token validation or authentication mechanics.\n\nname must not be an empty string (\"\").", "issuer": "issuer is a required field that configures how the platform interacts with the identity provider and how tokens issued from the identity provider are evaluated by the Kubernetes API server.", "oidcClients": "oidcClients is an optional field that configures how on-cluster, platform clients should request tokens from the identity provider. oidcClients must not exceed 20 entries and entries must have unique namespace/name pairs.", - "claimMappings": "claimMappings is an optional field that configures the rules to be used by the Kubernetes API server for translating claims in a JWT token, issued by the identity provider, to a cluster identity.", + "claimMappings": "claimMappings is a required field that configures the rules to be used by the Kubernetes API server for translating claims in a JWT token, issued by the identity provider, to a cluster identity.", "claimValidationRules": "claimValidationRules is an optional field that configures the rules to be used by the Kubernetes API server for validating the claims in a JWT token issued by the identity provider.\n\nValidation rules are joined via an AND operation.", } @@ -474,7 +474,7 @@ func (TokenClaimMapping) SwaggerDoc() map[string]string { } var map_TokenClaimMappings = map[string]string{ - "username": "username is an optional field that configures how the username of a cluster identity should be constructed from the claims in a JWT token issued by the identity provider.", + "username": "username is a required field that configures how the username of a cluster identity should be constructed from the claims in a JWT token issued by the identity provider.", "groups": "groups is an optional field that configures how the groups of a cluster identity should be constructed from the claims in a JWT token issued by the identity provider. When referencing a claim, if the claim is present in the JWT token, its value must be a list of groups separated by a comma (','). For example - '\"example\"' and '\"exampleOne\", \"exampleTwo\", \"exampleThree\"' are valid claim values.", "uid": "uid is an optional field for configuring the claim mapping used to construct the uid for the cluster identity.\n\nWhen using uid.claim to specify the claim it must be a single string value. When using uid.expression the expression must result in a single string value.\n\nWhen omitted, this means the user has no opinion and the platform is left to choose a default, which is subject to change over time. The current default is to use the 'sub' claim.", "extra": "extra is an optional field for configuring the mappings used to construct the extra attribute for the cluster identity. When omitted, no extra attributes will be present on the cluster identity. key values for extra mappings must be unique. A maximum of 64 extra attribute mappings may be provided.", @@ -523,6 +523,7 @@ func (TokenRequiredClaim) SwaggerDoc() map[string]string { } var map_UsernameClaimMapping = map[string]string{ + "claim": "claim is a required field that configures the JWT token claim whose value is assigned to the cluster identity field associated with this mapping.\n\nclaim must not be an empty string (\"\") and must not exceed 256 characters.", "prefixPolicy": "prefixPolicy is an optional field that configures how a prefix should be applied to the value of the JWT claim specified in the 'claim' field.\n\nAllowed values are 'Prefix', 'NoPrefix', and omitted (not provided or an empty string).\n\nWhen set to 'Prefix', the value specified in the prefix field will be prepended to the value of the JWT claim. The prefix field must be set when prefixPolicy is 'Prefix'.\n\nWhen set to 'NoPrefix', no prefix will be prepended to the value of the JWT claim.\n\nWhen omitted, this means no opinion and the platform is left to choose any prefixes that are applied which is subject to change over time. Currently, the platform prepends `{issuerURL}#` to the value of the JWT claim when the claim is not 'email'. As an example, consider the following scenario:\n `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`,\n the JWT claims include \"username\":\"userA\" and \"email\":\"userA@myoidc.tld\",\n and `claim` is set to:\n - \"username\": the mapped value will be \"https://myoidc.tld#userA\"\n - \"email\": the mapped value will be \"userA@myoidc.tld\"", "prefix": "prefix configures the prefix that should be prepended to the value of the JWT claim.\n\nprefix must be set when prefixPolicy is set to 'Prefix' and must be unset otherwise.", } @@ -610,6 +611,45 @@ func (ImageLabel) SwaggerDoc() map[string]string { return map_ImageLabel } +var map_ClusterImagePolicy = map[string]string{ + "": "ClusterImagePolicy holds cluster-wide configuration for image signature verification\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec contains the configuration for the cluster image policy.", + "status": "status contains the observed state of the resource.", +} + +func (ClusterImagePolicy) SwaggerDoc() map[string]string { + return map_ClusterImagePolicy +} + +var map_ClusterImagePolicyList = map[string]string{ + "": "ClusterImagePolicyList is a list of ClusterImagePolicy resources\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is a list of ClusterImagePolices", +} + +func (ClusterImagePolicyList) SwaggerDoc() map[string]string { + return map_ClusterImagePolicyList +} + +var map_ClusterImagePolicySpec = map[string]string{ + "": "CLusterImagePolicySpec is the specification of the ClusterImagePolicy custom resource.", + "scopes": "scopes is a required field that defines the list of image identities assigned to a policy. Each item refers to a scope in a registry implementing the \"Docker Registry HTTP API V2\". Scopes matching individual images are named Docker references in the fully expanded form, either using a tag or digest. For example, docker.io/library/busybox:latest (not busybox:latest). More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest), a repository namespace, or a registry host (by only specifying the host name and possibly a port number) or a wildcard expression starting with `*.`, for matching all subdomains (not including a port number). Wildcards are only supported for subdomain matching, and may not be used in the middle of the host, i.e. *.example.com is a valid case, but example*.*.com is not. This support no more than 256 scopes in one object. If multiple scopes match a given image, only the policy requirements for the most specific scope apply. The policy requirements for more general scopes are ignored. In addition to setting a policy appropriate for your own deployed applications, make sure that a policy on the OpenShift image repositories quay.io/openshift-release-dev/ocp-release, quay.io/openshift-release-dev/ocp-v4.0-art-dev (or on a more general scope) allows deployment of the OpenShift images required for cluster operation. If a scope is configured in both the ClusterImagePolicy and the ImagePolicy, or if the scope in ImagePolicy is nested under one of the scopes from the ClusterImagePolicy, only the policy from the ClusterImagePolicy will be applied. For additional details about the format, please refer to the document explaining the docker transport field, which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker", + "policy": "policy is a required field that contains configuration to allow scopes to be verified, and defines how images not matching the verification policy will be treated.", +} + +func (ClusterImagePolicySpec) SwaggerDoc() map[string]string { + return map_ClusterImagePolicySpec +} + +var map_ClusterImagePolicyStatus = map[string]string{ + "conditions": "conditions provide details on the status of this API Resource.", +} + +func (ClusterImagePolicyStatus) SwaggerDoc() map[string]string { + return map_ClusterImagePolicyStatus +} + var map_ClusterOperator = map[string]string{ "": "ClusterOperator is the Custom Resource object which holds the current state of an operator. This object is used by operators to convey their state to the rest of the cluster.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -739,7 +779,7 @@ var map_ClusterVersionSpec = map[string]string{ "clusterID": "clusterID uniquely identifies this cluster. This is expected to be an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx in hexadecimal values). This is a required field.", "desiredUpdate": "desiredUpdate is an optional field that indicates the desired value of the cluster version. Setting this value will trigger an upgrade (if the current version does not match the desired version). The set of recommended update values is listed as part of available updates in status, and setting values outside that range may cause the upgrade to fail.\n\nSome of the fields are inter-related with restrictions and meanings described here. 1. image is specified, version is specified, architecture is specified. API validation error. 2. image is specified, version is specified, architecture is not specified. The version extracted from the referenced image must match the specified version. 3. image is specified, version is not specified, architecture is specified. API validation error. 4. image is specified, version is not specified, architecture is not specified. image is used. 5. image is not specified, version is specified, architecture is specified. version and desired architecture are used to select an image. 6. image is not specified, version is specified, architecture is not specified. version and current architecture are used to select an image. 7. image is not specified, version is not specified, architecture is specified. API validation error. 8. image is not specified, version is not specified, architecture is not specified. API validation error.\n\nIf an upgrade fails the operator will halt and report status about the failing component. Setting the desired update value back to the previous version will cause a rollback to be attempted. Not all rollbacks will succeed.", "upstream": "upstream may be used to specify the preferred update server. By default it will use the appropriate update server for the cluster and region.", - "channel": "channel is an identifier for explicitly requesting that a non-default set of updates be applied to this cluster. The default channel will be contain stable updates that are appropriate for production clusters.", + "channel": "channel is an identifier for explicitly requesting a non-default set of updates to be applied to this cluster. The default channel will contain stable updates that are appropriate for production clusters.", "capabilities": "capabilities configures the installation of optional, core cluster components. A null value here is identical to an empty object; see the child properties for default semantics.", "signatureStores": "signatureStores contains the upstream URIs to verify release signatures and optional reference to a config map by name containing the PEM-encoded CA bundle.\n\nBy default, CVO will use existing signature stores if this property is empty. The CVO will check the release signatures in the local ConfigMaps first. It will search for a valid signature in these stores in parallel only when local ConfigMaps did not include a valid signature. Validation will fail if none of the signature stores reply with valid signature before timeout. Setting signatureStores will replace the default signature stores with custom signature stores. Default stores can be used with custom signature stores by adding them manually.\n\nA maximum of 32 signature stores may be configured.", "overrides": "overrides is list of overides for components that are managed by cluster version operator. Marking a component unmanaged will prevent the operator from creating or updating the object.", @@ -1174,6 +1214,147 @@ func (ImageDigestMirrors) SwaggerDoc() map[string]string { return map_ImageDigestMirrors } +var map_FulcioCAWithRekor = map[string]string{ + "": "FulcioCAWithRekor defines the root of trust based on the Fulcio certificate and the Rekor public key.", + "fulcioCAData": "fulcioCAData is a required field contains inline base64-encoded data for the PEM format fulcio CA. fulcioCAData must be at most 8192 characters. ", + "rekorKeyData": "rekorKeyData is a required field contains inline base64-encoded data for the PEM format from the Rekor public key. rekorKeyData must be at most 8192 characters. ", + "fulcioSubject": "fulcioSubject is a required field specifies OIDC issuer and the email of the Fulcio authentication configuration.", +} + +func (FulcioCAWithRekor) SwaggerDoc() map[string]string { + return map_FulcioCAWithRekor +} + +var map_ImagePolicy = map[string]string{ + "": "ImagePolicy holds namespace-wide configuration for image signature verification\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status contains the observed state of the resource.", +} + +func (ImagePolicy) SwaggerDoc() map[string]string { + return map_ImagePolicy +} + +var map_ImagePolicyList = map[string]string{ + "": "ImagePolicyList is a list of ImagePolicy resources\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is a list of ImagePolicies", +} + +func (ImagePolicyList) SwaggerDoc() map[string]string { + return map_ImagePolicyList +} + +var map_ImagePolicySpec = map[string]string{ + "": "ImagePolicySpec is the specification of the ImagePolicy CRD.", + "scopes": "scopes is a required field that defines the list of image identities assigned to a policy. Each item refers to a scope in a registry implementing the \"Docker Registry HTTP API V2\". Scopes matching individual images are named Docker references in the fully expanded form, either using a tag or digest. For example, docker.io/library/busybox:latest (not busybox:latest). More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest), a repository namespace, or a registry host (by only specifying the host name and possibly a port number) or a wildcard expression starting with `*.`, for matching all subdomains (not including a port number). Wildcards are only supported for subdomain matching, and may not be used in the middle of the host, i.e. *.example.com is a valid case, but example*.*.com is not. This support no more than 256 scopes in one object. If multiple scopes match a given image, only the policy requirements for the most specific scope apply. The policy requirements for more general scopes are ignored. In addition to setting a policy appropriate for your own deployed applications, make sure that a policy on the OpenShift image repositories quay.io/openshift-release-dev/ocp-release, quay.io/openshift-release-dev/ocp-v4.0-art-dev (or on a more general scope) allows deployment of the OpenShift images required for cluster operation. If a scope is configured in both the ClusterImagePolicy and the ImagePolicy, or if the scope in ImagePolicy is nested under one of the scopes from the ClusterImagePolicy, only the policy from the ClusterImagePolicy will be applied. For additional details about the format, please refer to the document explaining the docker transport field, which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker", + "policy": "policy is a required field that contains configuration to allow scopes to be verified, and defines how images not matching the verification policy will be treated.", +} + +func (ImagePolicySpec) SwaggerDoc() map[string]string { + return map_ImagePolicySpec +} + +var map_ImagePolicyStatus = map[string]string{ + "conditions": "conditions provide details on the status of this API Resource. condition type 'Pending' indicates that the customer resource contains a policy that cannot take effect. It is either overwritten by a global policy or the image scope is not valid.", +} + +func (ImagePolicyStatus) SwaggerDoc() map[string]string { + return map_ImagePolicyStatus +} + +var map_PKI = map[string]string{ + "": "PKI defines the root of trust based on Root CA(s) and corresponding intermediate certificates.", + "caRootsData": "caRootsData contains base64-encoded data of a certificate bundle PEM file, which contains one or more CA roots in the PEM format. The total length of the data must not exceed 8192 characters. ", + "caIntermediatesData": "caIntermediatesData contains base64-encoded data of a certificate bundle PEM file, which contains one or more intermediate certificates in the PEM format. The total length of the data must not exceed 8192 characters. caIntermediatesData requires caRootsData to be set. ", + "pkiCertificateSubject": "pkiCertificateSubject defines the requirements imposed on the subject to which the certificate was issued.", +} + +func (PKI) SwaggerDoc() map[string]string { + return map_PKI +} + +var map_PKICertificateSubject = map[string]string{ + "": "PKICertificateSubject defines the requirements imposed on the subject to which the certificate was issued.", + "email": "email specifies the expected email address imposed on the subject to which the certificate was issued, and must match the email address listed in the Subject Alternative Name (SAN) field of the certificate. The email must be a valid email address and at most 320 characters in length.", + "hostname": "hostname specifies the expected hostname imposed on the subject to which the certificate was issued, and it must match the hostname listed in the Subject Alternative Name (SAN) DNS field of the certificate. The hostname must be a valid dns 1123 subdomain name, optionally prefixed by '*.', and at most 253 characters in length. It must consist only of lowercase alphanumeric characters, hyphens, periods and the optional preceding asterisk.", +} + +func (PKICertificateSubject) SwaggerDoc() map[string]string { + return map_PKICertificateSubject +} + +var map_Policy = map[string]string{ + "": "Policy defines the verification policy for the items in the scopes list.", + "rootOfTrust": "rootOfTrust is a required field that defines the root of trust for verifying image signatures during retrieval. This allows image consumers to specify policyType and corresponding configuration of the policy, matching how the policy was generated.", + "signedIdentity": "signedIdentity is an optional field specifies what image identity the signature claims about the image. This is useful when the image identity in the signature differs from the original image spec, such as when mirror registry is configured for the image scope, the signature from the mirror registry contains the image identity of the mirror instead of the original scope. The required matchPolicy field specifies the approach used in the verification process to verify the identity in the signature and the actual image identity, the default matchPolicy is \"MatchRepoDigestOrExact\".", +} + +func (Policy) SwaggerDoc() map[string]string { + return map_Policy +} + +var map_PolicyFulcioSubject = map[string]string{ + "": "PolicyFulcioSubject defines the OIDC issuer and the email of the Fulcio authentication configuration.", + "oidcIssuer": "oidcIssuer is a required filed contains the expected OIDC issuer. The oidcIssuer must be a valid URL and at most 2048 characters in length. It will be verified that the Fulcio-issued certificate contains a (Fulcio-defined) certificate extension pointing at this OIDC issuer URL. When Fulcio issues certificates, it includes a value based on an URL inside the client-provided ID token. Example: \"https://expected.OIDC.issuer/\"", + "signedEmail": "signedEmail is a required field holds the email address that the Fulcio certificate is issued for. The signedEmail must be a valid email address and at most 320 characters in length. Example: \"expected-signing-user@example.com\"", +} + +func (PolicyFulcioSubject) SwaggerDoc() map[string]string { + return map_PolicyFulcioSubject +} + +var map_PolicyIdentity = map[string]string{ + "": "PolicyIdentity defines image identity the signature claims about the image. When omitted, the default matchPolicy is \"MatchRepoDigestOrExact\".", + "matchPolicy": "matchPolicy is a required filed specifies matching strategy to verify the image identity in the signature against the image scope. Allowed values are \"MatchRepoDigestOrExact\", \"MatchRepository\", \"ExactRepository\", \"RemapIdentity\". When omitted, the default value is \"MatchRepoDigestOrExact\". When set to \"MatchRepoDigestOrExact\", the identity in the signature must be in the same repository as the image identity if the image identity is referenced by a digest. Otherwise, the identity in the signature must be the same as the image identity. When set to \"MatchRepository\", the identity in the signature must be in the same repository as the image identity. When set to \"ExactRepository\", the exactRepository must be specified. The identity in the signature must be in the same repository as a specific identity specified by \"repository\". When set to \"RemapIdentity\", the remapIdentity must be specified. The signature must be in the same as the remapped image identity. Remapped image identity is obtained by replacing the \"prefix\" with the specified “signedPrefix” if the the image identity matches the specified remapPrefix.", + "exactRepository": "exactRepository specifies the repository that must be exactly matched by the identity in the signature. exactRepository is required if matchPolicy is set to \"ExactRepository\". It is used to verify that the signature claims an identity matching this exact repository, rather than the original image identity.", + "remapIdentity": "remapIdentity specifies the prefix remapping rule for verifying image identity. remapIdentity is required if matchPolicy is set to \"RemapIdentity\". It is used to verify that the signature claims a different registry/repository prefix than the original image.", +} + +func (PolicyIdentity) SwaggerDoc() map[string]string { + return map_PolicyIdentity +} + +var map_PolicyMatchExactRepository = map[string]string{ + "repository": "repository is the reference of the image identity to be matched. repository is required if matchPolicy is set to \"ExactRepository\". The value should be a repository name (by omitting the tag or digest) in a registry implementing the \"Docker Registry HTTP API V2\". For example, docker.io/library/busybox", +} + +func (PolicyMatchExactRepository) SwaggerDoc() map[string]string { + return map_PolicyMatchExactRepository +} + +var map_PolicyMatchRemapIdentity = map[string]string{ + "prefix": "prefix is required if matchPolicy is set to \"RemapIdentity\". prefix is the prefix of the image identity to be matched. If the image identity matches the specified prefix, that prefix is replaced by the specified “signedPrefix” (otherwise it is used as unchanged and no remapping takes place). This is useful when verifying signatures for a mirror of some other repository namespace that preserves the vendor’s repository structure. The prefix and signedPrefix values can be either host[:port] values (matching exactly the same host[:port], string), repository namespaces, or repositories (i.e. they must not contain tags/digests), and match as prefixes of the fully expanded form. For example, docker.io/library/busybox (not busybox) to specify that single repository, or docker.io/library (not an empty string) to specify the parent namespace of docker.io/library/busybox.", + "signedPrefix": "signedPrefix is required if matchPolicy is set to \"RemapIdentity\". signedPrefix is the prefix of the image identity to be matched in the signature. The format is the same as \"prefix\". The values can be either host[:port] values (matching exactly the same host[:port], string), repository namespaces, or repositories (i.e. they must not contain tags/digests), and match as prefixes of the fully expanded form. For example, docker.io/library/busybox (not busybox) to specify that single repository, or docker.io/library (not an empty string) to specify the parent namespace of docker.io/library/busybox.", +} + +func (PolicyMatchRemapIdentity) SwaggerDoc() map[string]string { + return map_PolicyMatchRemapIdentity +} + +var map_PolicyRootOfTrust = map[string]string{ + "": "PolicyRootOfTrust defines the root of trust based on the selected policyType.", + "policyType": "policyType is a required field specifies the type of the policy for verification. This field must correspond to how the policy was generated. Allowed values are \"PublicKey\", \"FulcioCAWithRekor\", and \"PKI\". When set to \"PublicKey\", the policy relies on a sigstore publicKey and may optionally use a Rekor verification. When set to \"FulcioCAWithRekor\", the policy is based on the Fulcio certification and incorporates a Rekor verification. When set to \"PKI\", the policy is based on the certificates from Bring Your Own Public Key Infrastructure (BYOPKI). This value is enabled by turning on the SigstoreImageVerificationPKI feature gate.", + "publicKey": "publicKey defines the root of trust configuration based on a sigstore public key. Optionally include a Rekor public key for Rekor verification. publicKey is required when policyType is PublicKey, and forbidden otherwise.", + "fulcioCAWithRekor": "fulcioCAWithRekor defines the root of trust configuration based on the Fulcio certificate and the Rekor public key. fulcioCAWithRekor is required when policyType is FulcioCAWithRekor, and forbidden otherwise For more information about Fulcio and Rekor, please refer to the document at: https://github.com/sigstore/fulcio and https://github.com/sigstore/rekor", + "pki": "pki defines the root of trust configuration based on Bring Your Own Public Key Infrastructure (BYOPKI) Root CA(s) and corresponding intermediate certificates. pki is required when policyType is PKI, and forbidden otherwise.", +} + +func (PolicyRootOfTrust) SwaggerDoc() map[string]string { + return map_PolicyRootOfTrust +} + +var map_PublicKey = map[string]string{ + "": "PublicKey defines the root of trust based on a sigstore public key.", + "keyData": "keyData is a required field contains inline base64-encoded data for the PEM format public key. keyData must be at most 8192 characters. ", + "rekorKeyData": "rekorKeyData is an optional field contains inline base64-encoded data for the PEM format from the Rekor public key. rekorKeyData must be at most 8192 characters. ", +} + +func (PublicKey) SwaggerDoc() map[string]string { + return map_PublicKey +} + var map_ImageTagMirrorSet = map[string]string{ "": "ImageTagMirrorSet holds cluster-wide information about how to handle registry mirror rules on using tag pull specification. When multiple policies are defined, the outcome of the behavior is defined on each field.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", diff --git a/vendor/github.com/openshift/api/features/features.go b/vendor/github.com/openshift/api/features/features.go index 48691d00360..9ff328a89b7 100644 --- a/vendor/github.com/openshift/api/features/features.go +++ b/vendor/github.com/openshift/api/features/features.go @@ -52,12 +52,12 @@ var ( enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() - FeatureGateValidatingAdmissionPolicy = newFeatureGate("ValidatingAdmissionPolicy"). + FeatureGateMutatingAdmissionPolicy = newFeatureGate("MutatingAdmissionPolicy"). reportProblemsToJiraComponent("kube-apiserver"). contactPerson("benluddy"). productScope(kubernetes). - enhancementPR("https://github.com/kubernetes/enhancements/issues/3488"). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enhancementPR("https://github.com/kubernetes/enhancements/issues/3962"). + enableIn(). mustRegister() FeatureGateGatewayAPI = newFeatureGate("GatewayAPI"). @@ -132,6 +132,14 @@ var ( enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() + FeatureGateAzureDedicatedHosts = newFeatureGate("AzureDedicatedHosts"). + reportProblemsToJiraComponent("installer"). + contactPerson("rvanderp3"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1783"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + FeatureGateMaxUnavailableStatefulSet = newFeatureGate("MaxUnavailableStatefulSet"). reportProblemsToJiraComponent("apps"). contactPerson("atiratree"). @@ -147,14 +155,6 @@ var ( enhancementPR("https://github.com/kubernetes/enhancements/issues/3386"). mustRegister() - FeatureGatePrivateHostedZoneAWS = newFeatureGate("PrivateHostedZoneAWS"). - reportProblemsToJiraComponent("Routing"). - contactPerson("miciah"). - productScope(ocpSpecific). - enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() - FeatureGateSigstoreImageVerification = newFeatureGate("SigstoreImageVerification"). reportProblemsToJiraComponent("node"). contactPerson("sgrunert"). @@ -248,7 +248,7 @@ var ( contactPerson("jcaamano"). productScope(ocpSpecific). enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateNetworkLiveMigration = newFeatureGate("NetworkLiveMigration"). @@ -316,12 +316,20 @@ var ( FeatureGateMachineConfigNodes = newFeatureGate("MachineConfigNodes"). reportProblemsToJiraComponent("MachineConfigOperator"). - contactPerson("cdoern"). + contactPerson("ijanssen"). productScope(ocpSpecific). - enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enhancementPR("https://github.com/openshift/enhancements/pull/1765"). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() + FeatureGateImageModeStatusReporting = newFeatureGate("ImageModeStatusReporting"). + reportProblemsToJiraComponent("MachineConfigOperator"). + contactPerson("ijanssen"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1809"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + FeatureGateClusterAPIInstall = newFeatureGate("ClusterAPIInstall"). reportProblemsToJiraComponent("Installer"). contactPerson("vincepri"). @@ -377,6 +385,22 @@ var ( enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() + FeatureGateManagedBootImagesAzure = newFeatureGate("ManagedBootImagesAzure"). + reportProblemsToJiraComponent("MachineConfigOperator"). + contactPerson("djoshy"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1761"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateBootImageSkewEnforcement = newFeatureGate("BootImageSkewEnforcement"). + reportProblemsToJiraComponent("MachineConfigOperator"). + contactPerson("djoshy"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1761"). + enableIn(configv1.DevPreviewNoUpgrade). + mustRegister() + FeatureGateOnClusterBuild = newFeatureGate("OnClusterBuild"). reportProblemsToJiraComponent("MachineConfigOperator"). contactPerson("cheesesashimi"). @@ -411,10 +435,10 @@ var ( FeatureGatePinnedImages = newFeatureGate("PinnedImages"). reportProblemsToJiraComponent("MachineConfigOperator"). - contactPerson("jhernand"). + contactPerson("RishabhSaini"). productScope(ocpSpecific). enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateUpgradeStatus = newFeatureGate("UpgradeStatus"). @@ -483,14 +507,6 @@ var ( enableIn(configv1.DevPreviewNoUpgrade). mustRegister() - FeatureGatePlatformOperators = newFeatureGate("PlatformOperators"). - reportProblemsToJiraComponent("olm"). - contactPerson("joe"). - productScope(ocpSpecific). - enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() - FeatureGateNewOLM = newFeatureGate("NewOLM"). reportProblemsToJiraComponent("olm"). contactPerson("joe"). @@ -547,14 +563,6 @@ var ( enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() - FeatureGateNodeDisruptionPolicy = newFeatureGate("NodeDisruptionPolicy"). - reportProblemsToJiraComponent("MachineConfigOperator"). - contactPerson("jerzhang"). - productScope(ocpSpecific). - enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() - FeatureGateMetricsCollectionProfiles = newFeatureGate("MetricsCollectionProfiles"). reportProblemsToJiraComponent("Monitoring"). contactPerson("rexagod"). @@ -791,6 +799,22 @@ var ( enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() + FeatureGateStoragePerformantSecurityPolicy = newFeatureGate("StoragePerformantSecurityPolicy"). + reportProblemsToJiraComponent("Storage / Kubernetes External Components"). + contactPerson("hekumar"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1804"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateMultiDiskSetup = newFeatureGate("MultiDiskSetup"). + reportProblemsToJiraComponent("splat"). + contactPerson("jcpowermac"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1805"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + FeatureGateAWSDedicatedHosts = newFeatureGate("AWSDedicatedHosts"). reportProblemsToJiraComponent("Installer"). contactPerson("faermanj"). @@ -806,4 +830,20 @@ var ( enhancementPR("https://github.com/openshift/enhancements/pull/1772"). enableIn(configv1.DevPreviewNoUpgrade). mustRegister() + + FeatureGatePreconfiguredUDNAddresses = newFeatureGate("PreconfiguredUDNAddresses"). + reportProblemsToJiraComponent("Networking/ovn-kubernetes"). + contactPerson("kyrtapz"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1793"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateAWSServiceLBNetworkSecurityGroup = newFeatureGate("AWSServiceLBNetworkSecurityGroup"). + reportProblemsToJiraComponent("Cloud Compute / Cloud Controller Manager"). + contactPerson("mtulio"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1802"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() ) diff --git a/vendor/github.com/openshift/api/features/legacyfeaturegates.go b/vendor/github.com/openshift/api/features/legacyfeaturegates.go index 2a74f8e8b84..67572c31ca4 100644 --- a/vendor/github.com/openshift/api/features/legacyfeaturegates.go +++ b/vendor/github.com/openshift/api/features/legacyfeaturegates.go @@ -91,8 +91,6 @@ var legacyFeatureGates = sets.New( // never add to this list, if you think you have an exception ask @deads2k "NewOLM", // never add to this list, if you think you have an exception ask @deads2k - "NodeDisruptionPolicy", - // never add to this list, if you think you have an exception ask @deads2k "OVNObservability", // never add to this list, if you think you have an exception ask @deads2k "OnClusterBuild", @@ -101,8 +99,6 @@ var legacyFeatureGates = sets.New( // never add to this list, if you think you have an exception ask @deads2k "PinnedImages", // never add to this list, if you think you have an exception ask @deads2k - "PlatformOperators", - // never add to this list, if you think you have an exception ask @deads2k "PrivateHostedZoneAWS", // never add to this list, if you think you have an exception ask @deads2k "RouteAdvertisements", diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go b/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go index d69bcd02337..db15df2cc49 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go @@ -140,8 +140,10 @@ type BlockDeviceMappingSpec struct { // https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EbsBlockDevice type EBSBlockDeviceSpec struct { // Indicates whether the EBS volume is deleted on machine termination. + // + // Deprecated: setting this field has no effect. // +optional - DeleteOnTermination *bool `json:"deleteOnTermination,omitempty"` + DeprecatedDeleteOnTermination *bool `json:"deleteOnTermination,omitempty"` // Indicates whether the EBS volume is encrypted. Encrypted Amazon EBS volumes // may only be attached to machines that support Amazon EBS encryption. // +optional diff --git a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go index ef8f1a55fe8..7763435a9e9 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go @@ -518,8 +518,8 @@ func (in *DiskSettings) DeepCopy() *DiskSettings { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EBSBlockDeviceSpec) DeepCopyInto(out *EBSBlockDeviceSpec) { *out = *in - if in.DeleteOnTermination != nil { - in, out := &in.DeleteOnTermination, &out.DeleteOnTermination + if in.DeprecatedDeleteOnTermination != nil { + in, out := &in.DeprecatedDeleteOnTermination, &out.DeprecatedDeleteOnTermination *out = new(bool) **out = **in } diff --git a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go index b2e55376fee..2667a0aa24a 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go @@ -84,7 +84,7 @@ func (BlockDeviceMappingSpec) SwaggerDoc() map[string]string { var map_EBSBlockDeviceSpec = map[string]string{ "": "EBSBlockDeviceSpec describes a block device for an EBS volume. https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EbsBlockDevice", - "deleteOnTermination": "Indicates whether the EBS volume is deleted on machine termination.", + "deleteOnTermination": "Indicates whether the EBS volume is deleted on machine termination.\n\nDeprecated: setting this field has no effect.", "encrypted": "Indicates whether the EBS volume is encrypted. Encrypted Amazon EBS volumes may only be attached to machines that support Amazon EBS encryption.", "kmsKey": "Indicates the KMS key that should be used to encrypt the Amazon EBS volume.", "iops": "The number of I/O operations per second (IOPS) that the volume supports. For io1, this represents the number of IOPS that are provisioned for the volume. For gp2, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting. For more information about General Purpose SSD baseline performance, I/O credits, and bursting, see Amazon EBS Volume Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) in the Amazon Elastic Compute Cloud User Guide.\n\nMinimal and maximal IOPS for io1 and gp2 are constrained. Please, check https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html for precise boundaries for individual volumes.\n\nCondition: This parameter is required for requests to create io1 volumes; it is not used in requests to create gp2, st1, sc1, or standard volumes.", diff --git a/vendor/github.com/openshift/api/operator/v1/types_machineconfiguration.go b/vendor/github.com/openshift/api/operator/v1/types_machineconfiguration.go index 4c53734d86d..2d88bcd7701 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_machineconfiguration.go +++ b/vendor/github.com/openshift/api/operator/v1/types_machineconfiguration.go @@ -53,7 +53,6 @@ type MachineConfigurationSpec struct { // MachineConfig-based updates, such as drains, service reloads, etc. Specifying this will allow // for less downtime when doing small configuration updates to the cluster. This configuration // has no effect on cluster upgrades which will still incur node disruption where required. - // +openshift:enable:FeatureGate=NodeDisruptionPolicy // +optional NodeDisruptionPolicy NodeDisruptionPolicyConfig `json:"nodeDisruptionPolicy"` } @@ -94,7 +93,6 @@ type MachineConfigurationStatus struct { // nodeDisruptionPolicyStatus status reflects what the latest cluster-validated policies are, // and will be used by the Machine Config Daemon during future node updates. - // +openshift:enable:FeatureGate=NodeDisruptionPolicy // +optional NodeDisruptionPolicyStatus NodeDisruptionPolicyStatus `json:"nodeDisruptionPolicyStatus"` diff --git a/vendor/github.com/openshift/api/operator/v1/types_network.go b/vendor/github.com/openshift/api/operator/v1/types_network.go index 713939ddbb9..111240eecff 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_network.go +++ b/vendor/github.com/openshift/api/operator/v1/types_network.go @@ -431,16 +431,14 @@ type OVNKubernetesConfig struct { // v4InternalSubnet is a v4 subnet used internally by ovn-kubernetes in case the // default one is being already used by something else. It must not overlap with // any other subnet being used by OpenShift or by the node network. The size of the - // subnet must be larger than the number of nodes. The value cannot be changed - // after installation. + // subnet must be larger than the number of nodes. // Default is 100.64.0.0/16 // +optional V4InternalSubnet string `json:"v4InternalSubnet,omitempty"` // v6InternalSubnet is a v6 subnet used internally by ovn-kubernetes in case the // default one is being already used by something else. It must not overlap with // any other subnet being used by OpenShift or by the node network. The size of the - // subnet must be larger than the number of nodes. The value cannot be changed - // after installation. + // subnet must be larger than the number of nodes. // Default is fd98::/64 // +optional V6InternalSubnet string `json:"v6InternalSubnet,omitempty"` @@ -478,11 +476,10 @@ type IPv4OVNKubernetesConfig struct { // architecture that connects the cluster routers on each node together to enable // east west traffic. The subnet chosen should not overlap with other networks // specified for OVN-Kubernetes as well as other networks used on the host. - // The value cannot be changed after installation. // When ommitted, this means no opinion and the platform is left to choose a reasonable // default which is subject to change over time. // The current default subnet is 100.88.0.0/16 - // The subnet must be large enough to accomadate one IP per node in your cluster + // The subnet must be large enough to accommodate one IP per node in your cluster // The value must be in proper IPV4 CIDR format // +kubebuilder:validation:MaxLength=18 // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).ip().family() == 4",message="Subnet must be in valid IPV4 CIDR format" @@ -493,10 +490,9 @@ type IPv4OVNKubernetesConfig struct { // internalJoinSubnet is a v4 subnet used internally by ovn-kubernetes in case the // default one is being already used by something else. It must not overlap with // any other subnet being used by OpenShift or by the node network. The size of the - // subnet must be larger than the number of nodes. The value cannot be changed - // after installation. + // subnet must be larger than the number of nodes. // The current default value is 100.64.0.0/16 - // The subnet must be large enough to accomadate one IP per node in your cluster + // The subnet must be large enough to accommodate one IP per node in your cluster // The value must be in proper IPV4 CIDR format // +kubebuilder:validation:MaxLength=18 // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).ip().family() == 4",message="Subnet must be in valid IPV4 CIDR format" @@ -512,10 +508,9 @@ type IPv6OVNKubernetesConfig struct { // architecture that connects the cluster routers on each node together to enable // east west traffic. The subnet chosen should not overlap with other networks // specified for OVN-Kubernetes as well as other networks used on the host. - // The value cannot be changed after installation. // When ommitted, this means no opinion and the platform is left to choose a reasonable // default which is subject to change over time. - // The subnet must be large enough to accomadate one IP per node in your cluster + // The subnet must be large enough to accommodate one IP per node in your cluster // The current default subnet is fd97::/64 // The value must be in proper IPV6 CIDR format // Note that IPV6 dual addresses are not permitted @@ -527,9 +522,8 @@ type IPv6OVNKubernetesConfig struct { // internalJoinSubnet is a v6 subnet used internally by ovn-kubernetes in case the // default one is being already used by something else. It must not overlap with // any other subnet being used by OpenShift or by the node network. The size of the - // subnet must be larger than the number of nodes. The value cannot be changed - // after installation. - // The subnet must be large enough to accomadate one IP per node in your cluster + // subnet must be larger than the number of nodes. + // The subnet must be large enough to accommodate one IP per node in your cluster // The current default value is fd98::/64 // The value must be in proper IPV6 CIDR format // Note that IPV6 dual addresses are not permitted @@ -646,7 +640,7 @@ type IPv4GatewayConfig struct { // OVN-Kubernetes as well as other networks used on the host. Additionally the subnet must // be large enough to accommodate 6 IPs (maximum prefix length /29). // When omitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. - // The current default subnet is 169.254.169.0/29 + // The current default subnet is 169.254.0.0/17 // The value must be in proper IPV4 CIDR format // +kubebuilder:validation:MaxLength=18 // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).ip().family() == 4",message="Subnet must be in valid IPV4 CIDR format" @@ -665,7 +659,7 @@ type IPv6GatewayConfig struct { // OVN-Kubernetes as well as other networks used on the host. Additionally the subnet must // be large enough to accommodate 6 IPs (maximum prefix length /125). // When omitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. - // The current default subnet is fd69::/125 + // The current default subnet is fd69::/112 // Note that IPV6 dual addresses are not permitted // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).ip().family() == 6",message="Subnet must be in valid IPV6 CIDR format" // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).prefixLength() <= 125",message="subnet must be in the range /0 to /125 inclusive" diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml index 2d9094e37fe..e9750a9242b 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml @@ -308,7 +308,6 @@ machineconfigurations.operator.openshift.io: Category: "" FeatureGates: - ManagedBootImages - - NodeDisruptionPolicy FilenameOperatorName: machine-config FilenameOperatorOrdering: "01" FilenameRunLevel: "0000_80" diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go index a0fa4fe4756..582f9686ffd 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go @@ -1712,7 +1712,7 @@ func (IPsecFullModeConfig) SwaggerDoc() map[string]string { var map_IPv4GatewayConfig = map[string]string{ "": "IPV4GatewayConfig holds the configuration paramaters for IPV4 connections in the GatewayConfig for OVN-Kubernetes", - "internalMasqueradeSubnet": "internalMasqueradeSubnet contains the masquerade addresses in IPV4 CIDR format used internally by ovn-kubernetes to enable host to service traffic. Each host in the cluster is configured with these addresses, as well as the shared gateway bridge interface. The values can be changed after installation. The subnet chosen should not overlap with other networks specified for OVN-Kubernetes as well as other networks used on the host. Additionally the subnet must be large enough to accommodate 6 IPs (maximum prefix length /29). When omitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. The current default subnet is 169.254.169.0/29 The value must be in proper IPV4 CIDR format", + "internalMasqueradeSubnet": "internalMasqueradeSubnet contains the masquerade addresses in IPV4 CIDR format used internally by ovn-kubernetes to enable host to service traffic. Each host in the cluster is configured with these addresses, as well as the shared gateway bridge interface. The values can be changed after installation. The subnet chosen should not overlap with other networks specified for OVN-Kubernetes as well as other networks used on the host. Additionally the subnet must be large enough to accommodate 6 IPs (maximum prefix length /29). When omitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. The current default subnet is 169.254.0.0/17 The value must be in proper IPV4 CIDR format", } func (IPv4GatewayConfig) SwaggerDoc() map[string]string { @@ -1720,8 +1720,8 @@ func (IPv4GatewayConfig) SwaggerDoc() map[string]string { } var map_IPv4OVNKubernetesConfig = map[string]string{ - "internalTransitSwitchSubnet": "internalTransitSwitchSubnet is a v4 subnet in IPV4 CIDR format used internally by OVN-Kubernetes for the distributed transit switch in the OVN Interconnect architecture that connects the cluster routers on each node together to enable east west traffic. The subnet chosen should not overlap with other networks specified for OVN-Kubernetes as well as other networks used on the host. The value cannot be changed after installation. When ommitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. The current default subnet is 100.88.0.0/16 The subnet must be large enough to accomadate one IP per node in your cluster The value must be in proper IPV4 CIDR format", - "internalJoinSubnet": "internalJoinSubnet is a v4 subnet used internally by ovn-kubernetes in case the default one is being already used by something else. It must not overlap with any other subnet being used by OpenShift or by the node network. The size of the subnet must be larger than the number of nodes. The value cannot be changed after installation. The current default value is 100.64.0.0/16 The subnet must be large enough to accomadate one IP per node in your cluster The value must be in proper IPV4 CIDR format", + "internalTransitSwitchSubnet": "internalTransitSwitchSubnet is a v4 subnet in IPV4 CIDR format used internally by OVN-Kubernetes for the distributed transit switch in the OVN Interconnect architecture that connects the cluster routers on each node together to enable east west traffic. The subnet chosen should not overlap with other networks specified for OVN-Kubernetes as well as other networks used on the host. When ommitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. The current default subnet is 100.88.0.0/16 The subnet must be large enough to accommodate one IP per node in your cluster The value must be in proper IPV4 CIDR format", + "internalJoinSubnet": "internalJoinSubnet is a v4 subnet used internally by ovn-kubernetes in case the default one is being already used by something else. It must not overlap with any other subnet being used by OpenShift or by the node network. The size of the subnet must be larger than the number of nodes. The current default value is 100.64.0.0/16 The subnet must be large enough to accommodate one IP per node in your cluster The value must be in proper IPV4 CIDR format", } func (IPv4OVNKubernetesConfig) SwaggerDoc() map[string]string { @@ -1730,7 +1730,7 @@ func (IPv4OVNKubernetesConfig) SwaggerDoc() map[string]string { var map_IPv6GatewayConfig = map[string]string{ "": "IPV6GatewayConfig holds the configuration paramaters for IPV6 connections in the GatewayConfig for OVN-Kubernetes", - "internalMasqueradeSubnet": "internalMasqueradeSubnet contains the masquerade addresses in IPV6 CIDR format used internally by ovn-kubernetes to enable host to service traffic. Each host in the cluster is configured with these addresses, as well as the shared gateway bridge interface. The values can be changed after installation. The subnet chosen should not overlap with other networks specified for OVN-Kubernetes as well as other networks used on the host. Additionally the subnet must be large enough to accommodate 6 IPs (maximum prefix length /125). When omitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. The current default subnet is fd69::/125 Note that IPV6 dual addresses are not permitted", + "internalMasqueradeSubnet": "internalMasqueradeSubnet contains the masquerade addresses in IPV6 CIDR format used internally by ovn-kubernetes to enable host to service traffic. Each host in the cluster is configured with these addresses, as well as the shared gateway bridge interface. The values can be changed after installation. The subnet chosen should not overlap with other networks specified for OVN-Kubernetes as well as other networks used on the host. Additionally the subnet must be large enough to accommodate 6 IPs (maximum prefix length /125). When omitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. The current default subnet is fd69::/112 Note that IPV6 dual addresses are not permitted", } func (IPv6GatewayConfig) SwaggerDoc() map[string]string { @@ -1738,8 +1738,8 @@ func (IPv6GatewayConfig) SwaggerDoc() map[string]string { } var map_IPv6OVNKubernetesConfig = map[string]string{ - "internalTransitSwitchSubnet": "internalTransitSwitchSubnet is a v4 subnet in IPV4 CIDR format used internally by OVN-Kubernetes for the distributed transit switch in the OVN Interconnect architecture that connects the cluster routers on each node together to enable east west traffic. The subnet chosen should not overlap with other networks specified for OVN-Kubernetes as well as other networks used on the host. The value cannot be changed after installation. When ommitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. The subnet must be large enough to accomadate one IP per node in your cluster The current default subnet is fd97::/64 The value must be in proper IPV6 CIDR format Note that IPV6 dual addresses are not permitted", - "internalJoinSubnet": "internalJoinSubnet is a v6 subnet used internally by ovn-kubernetes in case the default one is being already used by something else. It must not overlap with any other subnet being used by OpenShift or by the node network. The size of the subnet must be larger than the number of nodes. The value cannot be changed after installation. The subnet must be large enough to accomadate one IP per node in your cluster The current default value is fd98::/64 The value must be in proper IPV6 CIDR format Note that IPV6 dual addresses are not permitted", + "internalTransitSwitchSubnet": "internalTransitSwitchSubnet is a v4 subnet in IPV4 CIDR format used internally by OVN-Kubernetes for the distributed transit switch in the OVN Interconnect architecture that connects the cluster routers on each node together to enable east west traffic. The subnet chosen should not overlap with other networks specified for OVN-Kubernetes as well as other networks used on the host. When ommitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. The subnet must be large enough to accommodate one IP per node in your cluster The current default subnet is fd97::/64 The value must be in proper IPV6 CIDR format Note that IPV6 dual addresses are not permitted", + "internalJoinSubnet": "internalJoinSubnet is a v6 subnet used internally by ovn-kubernetes in case the default one is being already used by something else. It must not overlap with any other subnet being used by OpenShift or by the node network. The size of the subnet must be larger than the number of nodes. The subnet must be large enough to accommodate one IP per node in your cluster The current default value is fd98::/64 The value must be in proper IPV6 CIDR format Note that IPV6 dual addresses are not permitted", } func (IPv6OVNKubernetesConfig) SwaggerDoc() map[string]string { @@ -1840,8 +1840,8 @@ var map_OVNKubernetesConfig = map[string]string{ "ipsecConfig": "ipsecConfig enables and configures IPsec for pods on the pod network within the cluster.", "policyAuditConfig": "policyAuditConfig is the configuration for network policy audit events. If unset, reported defaults are used.", "gatewayConfig": "gatewayConfig holds the configuration for node gateway options.", - "v4InternalSubnet": "v4InternalSubnet is a v4 subnet used internally by ovn-kubernetes in case the default one is being already used by something else. It must not overlap with any other subnet being used by OpenShift or by the node network. The size of the subnet must be larger than the number of nodes. The value cannot be changed after installation. Default is 100.64.0.0/16", - "v6InternalSubnet": "v6InternalSubnet is a v6 subnet used internally by ovn-kubernetes in case the default one is being already used by something else. It must not overlap with any other subnet being used by OpenShift or by the node network. The size of the subnet must be larger than the number of nodes. The value cannot be changed after installation. Default is fd98::/64", + "v4InternalSubnet": "v4InternalSubnet is a v4 subnet used internally by ovn-kubernetes in case the default one is being already used by something else. It must not overlap with any other subnet being used by OpenShift or by the node network. The size of the subnet must be larger than the number of nodes. Default is 100.64.0.0/16", + "v6InternalSubnet": "v6InternalSubnet is a v6 subnet used internally by ovn-kubernetes in case the default one is being already used by something else. It must not overlap with any other subnet being used by OpenShift or by the node network. The size of the subnet must be larger than the number of nodes. Default is fd98::/64", "egressIPConfig": "egressIPConfig holds the configuration for EgressIP options.", "ipv4": "ipv4 allows users to configure IP settings for IPv4 connections. When ommitted, this means no opinions and the default configuration is used. Check individual fields within ipv4 for details of default values.", "ipv6": "ipv6 allows users to configure IP settings for IPv6 connections. When ommitted, this means no opinions and the default configuration is used. Check individual fields within ipv4 for details of default values.", diff --git a/vendor/modules.txt b/vendor/modules.txt index a44ace30c51..acc7392ec26 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1283,7 +1283,7 @@ github.com/opencontainers/image-spec/specs-go/v1 # github.com/opencontainers/runtime-spec v1.2.0 ## explicit github.com/opencontainers/runtime-spec/specs-go -# github.com/openshift/api v0.0.0-20250527072845-f5e205b58365 +# github.com/openshift/api v0.0.0-20250704153732-ad766c4e6d8e ## explicit; go 1.23.0 github.com/openshift/api/annotations github.com/openshift/api/config/v1