Skip to content

Commit

Permalink
NO-JIRA: perfprof: render: make ownerReference optional (#907)
Browse files Browse the repository at this point in the history
* perfprof: render: make ownerReference optional

When we set OwnerReference for dependant object, we must
use the object UID the apiserver set. Apiserver owns this field
(obviously) so setting it when missing, like the code did in the past,
is wrong and will cause conflicts on updates.

I the pre-render flow we can't wait for the apiserver to set a UID,
so the only possible option is to not set any OwnerReference at all.

Signed-off-by: Francesco Romani <fromani@redhat.com>

* perfprof: render: add weak owner reference support

There are cases on which we can't use the  real owner reference
in the render flow, because:
- we don't own the performanceprofile uid
- we can't wait to learn back the apiserver-decided uid because
  we need to render all the manifest at once.

so, to group objects together and enable to safely and easily
rebuild the ownerReference graph later, we add an annotation
which serves as weaker owner reference, using the profile name
and adding it to all the related objects.

Note this form of weak reference is meant to serve to a placeholder
for the real ownerReference which should be added by the relevant
controller later on, when the cluster is started and is running.

Signed-off-by: Francesco Romani <fromani@redhat.com>

---------

Signed-off-by: Francesco Romani <fromani@redhat.com>
(cherry picked from commit 0e31943)
  • Loading branch information
ffromani committed Feb 20, 2024
1 parent 01fc8d9 commit 5308f47
Show file tree
Hide file tree
Showing 39 changed files with 638 additions and 157 deletions.
16 changes: 14 additions & 2 deletions pkg/performanceprofile/cmd/render/cmd.go
Expand Up @@ -31,6 +31,7 @@ import (
type renderOpts struct {
assetsInDir string
assetsOutDir string
ownerRefMode string
}

// NewRenderCommand creates a render command.
Expand All @@ -39,6 +40,7 @@ type renderOpts struct {
// needed to generate the machine configs.
func NewRenderCommand() *cobra.Command {
renderOpts := renderOpts{}
renderOpts.SetDefaults()

cmd := &cobra.Command{
Use: "render",
Expand All @@ -59,9 +61,14 @@ func NewRenderCommand() *cobra.Command {
return cmd
}

func (r *renderOpts) SetDefaults() {
r.ownerRefMode = ownerRefModeLabelName
}

func (r *renderOpts) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&r.assetsInDir, "asset-input-dir", components.AssetsDir, "Input path for the assets directory. (Can be a comma separated list of directories.)")
fs.StringVar(&r.assetsOutDir, "asset-output-dir", r.assetsOutDir, "Output path for the rendered manifests.")
fs.StringVar(&r.ownerRefMode, "owner-ref", r.ownerRefMode, "Add Owner Reference to rendered manifests. Accepted values: 'none' to disable; 'k8s' for proper owner reference; 'label-name' to use just a label.")
// environment variables has precedence over standard input
r.readFlagsFromEnv()
}
Expand All @@ -74,18 +81,23 @@ func (r *renderOpts) readFlagsFromEnv() {
if assetsOutDir := os.Getenv("ASSET_OUTPUT_DIR"); len(assetsOutDir) > 0 {
r.assetsOutDir = assetsOutDir
}
if ownerRefMode, ok := os.LookupEnv("OWNER_REF"); ok {
r.ownerRefMode = ownerRefMode
}
}

func (r *renderOpts) Validate() error {
if !isValidOwnerRefMode(r.ownerRefMode) {
return fmt.Errorf("unsupported owner reference: %q", r.ownerRefMode)
}
if len(r.assetsOutDir) == 0 {
return fmt.Errorf("asset-output-dir must be specified")
}

return nil
}

func (r *renderOpts) Run() error {
return render(r.assetsInDir, r.assetsOutDir)
return render(r.ownerRefMode, r.assetsInDir, r.assetsOutDir)
}

func addKlogFlags(cmd *cobra.Command) {
Expand Down
111 changes: 83 additions & 28 deletions pkg/performanceprofile/cmd/render/render.go
Expand Up @@ -40,7 +40,6 @@ import (
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/uuid"

"k8s.io/klog"
)
Expand All @@ -49,6 +48,16 @@ const (
clusterConfigResourceName = "cluster"
)

const (
ownerRefModeNone = "none"
ownerRefModeK8S = "k8s"
ownerRefModeLabelName = "label-name"
)

const (
weakOwnerReferenceNameLabel = "performance.openshift.io/weak-owner-reference-name"
)

var (
manifestScheme = runtime.NewScheme()
codecFactory serializer.CodecFactory
Expand All @@ -70,8 +79,8 @@ func init() {

// Render will traverse the input directory and generate the proper performance profile files
// in to the output dir based on PerformanceProfile manifests contained in the input directory.
func render(inputDir, outputDir string) error {
klog.Info("Rendering files into: ", outputDir)
func render(ownerRefMode, inputDir, outputDir string) error {
klog.Infof("Rendering files into: %s (ownerRefMode=%v)", outputDir, ownerRefMode)

// Read asset directory fileInfo
filePaths, err := util.ListFiles(inputDir)
Expand Down Expand Up @@ -191,45 +200,91 @@ func render(inputDir, outputDir string) error {
return err
}

uid := pp.UID
if uid == types.UID("") {
uid = uuid.NewUUID()
}

or := []v1.OwnerReference{
{
Kind: pp.Kind,
Name: pp.Name,
APIVersion: pp.APIVersion,
UID: uid,
},
}

for _, componentObj := range components.ToObjects() {
componentObj.SetOwnerReferences(or)
if ownerRefMode == ownerRefModeK8S {
err = addOwnerReference(components, pp)
if err != nil {
return err
}
} else if ownerRefMode == ownerRefModeLabelName {
err = addWeakOwnerReferenceLabel(components, pp)
if err != nil {
return err
}
err = writeObject(outputDir, fmt.Sprintf("%s_%s.yaml", pp.Name, "annotated"), pp)
if err != nil {
return err
}
}

for kind, manifest := range components.ToManifestTable() {
b, err := yaml.Marshal(manifest)
err = writeObject(outputDir, fmt.Sprintf("%s_%s.yaml", pp.Name, strings.ToLower(kind)), manifest)
if err != nil {
return err
}
}
}

fileName := fmt.Sprintf("%s_%s.yaml", pp.Name, strings.ToLower(kind))
fullFilePath := filepath.Join(outputDir, fileName)
klog.Info("Writing file: ", fullFilePath)
return nil
}

err = os.WriteFile(fullFilePath, b, 0644)
if err != nil {
return err
}
klog.Info(fileName)
func writeObject(outputDir, fileName string, manifest interface{}) error {
fullFilePath := filepath.Join(outputDir, fileName)
klog.Infof("Writing file: %s -> %s", fileName, fullFilePath)

b, err := yaml.Marshal(manifest)
if err != nil {
return err
}

return os.WriteFile(fullFilePath, b, 0644)
}

func addOwnerReference(components *manifestset.ManifestResultSet, pp *performancev2.PerformanceProfile) error {
if pp == nil || pp.UID == types.UID("") {
return fmt.Errorf("Missing UID from performance profile")
}

or := []v1.OwnerReference{
{
Kind: pp.Kind,
Name: pp.Name,
APIVersion: pp.APIVersion,
UID: pp.UID,
},
}

for _, componentObj := range components.ToObjects() {
componentObj.SetOwnerReferences(or)
}

return nil
}

func addWeakOwnerReferenceLabel(components *manifestset.ManifestResultSet, pp *performancev2.PerformanceProfile) error {
lab := weakOwnerReferenceNameLabel // shortcut
val := pp.Name // shortcut

if pp.Labels == nil {
pp.Labels = make(map[string]string)
}
pp.Labels[lab] = val

for _, componentObj := range components.ToObjects() {
labels := componentObj.GetLabels()
if labels == nil {
labels = make(map[string]string)
}
labels[lab] = val
componentObj.SetLabels(labels)
}

return nil
}

func isValidOwnerRefMode(val string) bool {
return val == ownerRefModeNone || val == ownerRefModeK8S || val == ownerRefModeLabelName
}

// isLegacySNOWorkloadPinningMethod provides a check for situations where the user is creating an SNO cluster with the
// legacy method for CPU Partitioning. In order to make sure the bootstrap and running cluster MCs are synced up we check the MCs
// provided by the user, if any one of them contain the file addition to `/etc/kubernetes/openshift-workload-pinning` and have not set
Expand Down
Expand Up @@ -16,6 +16,7 @@ const (
defaultExpectedDir = "default"
pinnedExpectedDir = "pinned"
bootstrapExpectedDir = "bootstrap"
noRefExpectedDir = "no-ref"
)

var (
Expand Down Expand Up @@ -73,6 +74,39 @@ var _ = Describe("render command e2e test", func() {
)
runAndCompare(cmd, defaultExpectedDir)
})

It("Must fail to restore legacy and wrong legacy owner reference if uid is missing", func() {
cmdline := []string{
filepath.Join(binPath, "cluster-node-tuning-operator"),
"render",
"--asset-input-dir", strings.Join(assetsInDirs, ","),
"--asset-output-dir", assetsOutDir,
"--owner-ref", "k8s",
}
fmt.Fprintf(GinkgoWriter, "running: %v\n", cmdline)

cmd := exec.Command(cmdline[0], cmdline[1:]...)
_, err := cmd.Output()
Expect(err).To(HaveOccurred(), logStderr(err))
})

It("Must not set any owner reference if disabled explicitely", func() {
cmdline := []string{
filepath.Join(binPath, "cluster-node-tuning-operator"),
"render",
"--asset-input-dir", strings.Join(assetsInDirs, ","),
"--asset-output-dir", assetsOutDir,
"--owner-ref", "none",
}
fmt.Fprintf(GinkgoWriter, "running: %v\n", cmdline)

cmd := exec.Command(cmdline[0], cmdline[1:]...)
cmd.Env = append(cmd.Env,
fmt.Sprintf("ASSET_INPUT_DIR=%s", strings.Join(assetsInDirs, ",")),
fmt.Sprintf("ASSET_OUTPUT_DIR=%s", assetsOutDir),
)
runAndCompare(cmd, noRefExpectedDir)
})
})

Context("With pinned cluster resources", func() {
Expand All @@ -88,7 +122,6 @@ var _ = Describe("render command e2e test", func() {

cmd := exec.Command(cmdline[0], cmdline[1:]...)
runAndCompare(cmd, pinnedExpectedDir)

})

It("Given legacy SNO pinned infrastructure status, should render cpu partitioning configs", func() {
Expand Down Expand Up @@ -164,7 +197,7 @@ func cleanArtifacts() {

func runAndCompare(cmd *exec.Cmd, dir string) {
_, err := cmd.Output()
Expect(err).ToNot(HaveOccurred())
Expect(err).ToNot(HaveOccurred(), logStderr(err))

outputAssetsFiles, err := os.ReadDir(assetsOutDir)
Expect(err).ToNot(HaveOccurred())
Expand All @@ -186,3 +219,10 @@ func runAndCompare(cmd *exec.Cmd, dir string) {
diff)
}
}

func logStderr(err error) string {
if exitErr, ok := err.(*exec.ExitError); ok {
return fmt.Sprintf("error running the command: [[%s]]", exitErr.Stderr)
}
return fmt.Sprintf("error running the command: [[%s]]", err)
}
@@ -0,0 +1,16 @@
apiVersion: performance.openshift.io/v2
kind: PerformanceProfile
metadata:
creationTimestamp: null
name: openshift-bootstrap-master
labels:
performance.openshift.io/weak-owner-reference-name: "openshift-bootstrap-master"
spec:
cpu:
isolated: 0-1
reserved: 2-7
machineConfigPoolSelector:
pools.operator.machineconfiguration.openshift.io/master: ""
nodeSelector:
node-role.kubernetes.io/master: ""
status: {}
Expand Up @@ -3,11 +3,8 @@ kind: KubeletConfig
metadata:
creationTimestamp: null
name: performance-openshift-bootstrap-master
ownerReferences:
- apiVersion: performance.openshift.io/v2
kind: PerformanceProfile
name: openshift-bootstrap-master
uid: ""
labels:
performance.openshift.io/weak-owner-reference-name: "openshift-bootstrap-master"
spec:
kubeletConfig:
apiVersion: kubelet.config.k8s.io/v1beta1
Expand Down
Expand Up @@ -4,12 +4,8 @@ metadata:
creationTimestamp: null
labels:
machineconfiguration.openshift.io/role: master
performance.openshift.io/weak-owner-reference-name: "openshift-bootstrap-master"
name: 50-performance-openshift-bootstrap-master
ownerReferences:
- apiVersion: performance.openshift.io/v2
kind: PerformanceProfile
name: openshift-bootstrap-master
uid: ""
spec:
baseOSExtensionsContainerImage: ""
config:
Expand Down
Expand Up @@ -3,11 +3,8 @@ kind: Node
metadata:
creationTimestamp: null
name: cluster
ownerReferences:
- apiVersion: performance.openshift.io/v2
kind: PerformanceProfile
name: openshift-bootstrap-master
uid: ""
labels:
performance.openshift.io/weak-owner-reference-name: "openshift-bootstrap-master"
spec:
cgroupMode: v1
status: {}
Expand Up @@ -4,11 +4,8 @@ kind: RuntimeClass
metadata:
creationTimestamp: null
name: performance-openshift-bootstrap-master
ownerReferences:
- apiVersion: performance.openshift.io/v2
kind: PerformanceProfile
name: openshift-bootstrap-master
uid: ""
labels:
performance.openshift.io/weak-owner-reference-name: "openshift-bootstrap-master"
scheduling:
nodeSelector:
node-role.kubernetes.io/master: ""
Expand Up @@ -4,11 +4,8 @@ metadata:
creationTimestamp: null
name: openshift-node-performance-openshift-bootstrap-master
namespace: openshift-cluster-node-tuning-operator
ownerReferences:
- apiVersion: performance.openshift.io/v2
kind: PerformanceProfile
name: openshift-bootstrap-master
uid: ""
labels:
performance.openshift.io/weak-owner-reference-name: "openshift-bootstrap-master"
spec:
profile:
- data: "[main]\nsummary=Openshift node optimized for deterministic performance
Expand Down
@@ -0,0 +1,16 @@
apiVersion: performance.openshift.io/v2
kind: PerformanceProfile
metadata:
creationTimestamp: null
name: openshift-bootstrap-worker
labels:
performance.openshift.io/weak-owner-reference-name: "openshift-bootstrap-worker"
spec:
cpu:
isolated: 0-1
reserved: 2-3
machineConfigPoolSelector:
pools.operator.machineconfiguration.openshift.io/worker: ""
nodeSelector:
node-role.kubernetes.io/worker: ""
status: {}

0 comments on commit 5308f47

Please sign in to comment.