Skip to content

Commit

Permalink
Merge branch 'master' into lint-redefined-builtin-id
Browse files Browse the repository at this point in the history
  • Loading branch information
Affan-7 committed Jan 10, 2024
2 parents c6f5c08 + d10b6d8 commit 014737c
Show file tree
Hide file tree
Showing 51 changed files with 230 additions and 120 deletions.
10 changes: 10 additions & 0 deletions .golangci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,16 @@ linters-settings:
- name: error-return
- name: receiver-naming
- name: redefines-builtin-id
- name: increment-decrement
- name: range
- name: error-naming
- name: dot-imports
- name: errorf
- name: exported
- name: var-declaration
- name: blank-imports
- name: indent-error-flow
- name: unreachable-code
staticcheck:
checks:
- all
Expand Down
2 changes: 1 addition & 1 deletion cmd/controller-manager/app/controllermanager.go
Original file line number Diff line number Diff line change
Expand Up @@ -623,7 +623,7 @@ func startFederatedHorizontalPodAutoscalerController(ctx controllerscontext.Cont
ctx.Opts.HPAControllerConfiguration.HorizontalPodAutoscalerTolerance,
ctx.Opts.HPAControllerConfiguration.HorizontalPodAutoscalerCPUInitializationPeriod.Duration,
ctx.Opts.HPAControllerConfiguration.HorizontalPodAutoscalerInitialReadinessDelay.Duration)
federatedHPAController := federatedhpa.FederatedHPAController{
federatedHPAController := federatedhpa.FHPAController{
Client: ctx.Mgr.GetClient(),
EventRecorder: ctx.Mgr.GetEventRecorderFor(federatedhpa.ControllerName),
RESTMapper: ctx.Mgr.GetRESTMapper(),
Expand Down
22 changes: 17 additions & 5 deletions operator/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,13 @@ metadata:
EOF
```

You can also create a Karmada CR directly using the sample provided by the Karmada operator.

```shell
kubectl create namespace test
kubectl apply -f operator/config/samples/karmada.yaml
```

Wait for around 40 seconds, and the pods of the Karmada components will be running in the same namespace as the Karmada CR.

```shell
Expand All @@ -91,10 +98,19 @@ karmada-demo-apiserver-55968d9f8c-mp8hf 1/1 Running 0
karmada-demo-controller-manager-64455f7fd4-stls6 1/1 Running 0 5s
karmada-demo-etcd-0 1/1 Running 0 37s
karmada-demo-kube-controller-manager-584f978bbd-fftwq 1/1 Running 0 5s
karmada-demo-metrics-adapter-57cb5f56b6-4vwk2 1/1 Running 0 5s
karmada-demo-metrics-adapter-57cb5f56b6-zbhjk 1/1 Running 0 5s
karmada-demo-scheduler-6d77b7547-hgz8n 1/1 Running 0 5s
karmada-demo-webhook-6f5944f5d8-bpkqz 1/1 Running 0 5s
```

### Generate kubeconfig for karmada

```shell
kubectl get secret -n test karmada-demo-admin-config -o jsonpath={.data.kubeconfig} | base64 -d > ~/.kube/karmada-apiserver.config
export KUBECONFIG=~/.kube/karmada-apiserver.config
```

> **Tip**:
>
> If no `spec.hostCluster.secretRef` is specified in CR, the Karmada instance will be installed in the cluster where `karmada-operator` is located.
Expand Down Expand Up @@ -233,15 +249,11 @@ metadata:
namespace: test
spec:
components:
KarmadaDescheduler: {}
karmadaDescheduler: {}
```

If you want to install with the defaults, simply define an empty struct for `descheduler`.

> **Tip**:
>
> Now, we only support installing the `descheduler` addon.
## Contributing

The `karmada/operator` repo is part of Karmada from 1.5 onwards. If you're interested in
Expand Down
4 changes: 2 additions & 2 deletions operator/pkg/apis/operator/v1alpha1/defaults.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ var (
karmadaSchedulerImageRepository = fmt.Sprintf("%s/%s", constants.KarmadaDefaultRepository, constants.KarmadaScheduler)
karmadaWebhookImageRepository = fmt.Sprintf("%s/%s", constants.KarmadaDefaultRepository, constants.KarmadaWebhook)
karmadaDeschedulerImageRepository = fmt.Sprintf("%s/%s", constants.KarmadaDefaultRepository, constants.KarmadaDescheduler)
KarmadaMetricsAdapterImageRepository = fmt.Sprintf("%s/%s", constants.KarmadaDefaultRepository, constants.KarmadaMetricsAdapter)
karmadaMetricsAdapterImageRepository = fmt.Sprintf("%s/%s", constants.KarmadaDefaultRepository, constants.KarmadaMetricsAdapter)
karmadaSearchImageRepository = fmt.Sprintf("%s/%s", constants.KarmadaDefaultRepository, constants.KarmadaSearch)
)

Expand Down Expand Up @@ -285,7 +285,7 @@ func setDefaultsKarmadaMetricsAdapter(obj *KarmadaComponents) {

metricsAdapter := obj.KarmadaMetricsAdapter
if len(metricsAdapter.Image.ImageRepository) == 0 {
metricsAdapter.Image.ImageRepository = KarmadaMetricsAdapterImageRepository
metricsAdapter.Image.ImageRepository = karmadaMetricsAdapterImageRepository
}
if len(metricsAdapter.Image.ImageTag) == 0 {
metricsAdapter.Image.ImageTag = DefaultKarmadaImageVersion
Expand Down
3 changes: 3 additions & 0 deletions operator/pkg/apis/operator/v1alpha1/helper.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ func (image *Image) Name() string {
return fmt.Sprintf("%s:%s", image.ImageRepository, image.ImageTag)
}

// KarmadaInProgressing sets the Karmada condition to Progressing.
func KarmadaInProgressing(karmada *Karmada, conditionType ConditionType, message string) {
karmada.Status.Conditions = []metav1.Condition{}
newCondition := metav1.Condition{
Expand All @@ -40,6 +41,7 @@ func KarmadaInProgressing(karmada *Karmada, conditionType ConditionType, message
apimeta.SetStatusCondition(&karmada.Status.Conditions, newCondition)
}

// KarmadaCompleted sets the Karmada condition to Completed.
func KarmadaCompleted(karmada *Karmada, conditionType ConditionType, message string) {
karmada.Status.Conditions = []metav1.Condition{}
newCondition := metav1.Condition{
Expand All @@ -52,6 +54,7 @@ func KarmadaCompleted(karmada *Karmada, conditionType ConditionType, message str
apimeta.SetStatusCondition(&karmada.Status.Conditions, newCondition)
}

// KarmadaFailed sets the Karmada condition to Failed.
func KarmadaFailed(karmada *Karmada, conditionType ConditionType, message string) {
karmada.Status.Conditions = []metav1.Condition{}
newCondition := metav1.Condition{
Expand Down
11 changes: 11 additions & 0 deletions operator/pkg/certs/certs.go
Original file line number Diff line number Diff line change
Expand Up @@ -481,6 +481,17 @@ func apiServerAltNamesMutator(cfg *AltNamesMutatorConfig) (*certutil.AltNames, e
},
}

// When deploying a karmada under a namespace other than 'karmada-system', like 'test', there are two scenarios below:
// 1.When karmada-apiserver access APIService, the cert of 'karmada-demo-aggregated-apiserver' will be verified to see
// if its altNames contains 'karmada-demo-aggregated-apiserver.karmada-system.svc';
// 2.When karmada-apiserver access webhook, the cert of 'karmada-demo-webhook' will be verified to see
// if its altNames contains 'karmada-demo-webhook.test.svc'.
// Therefore, the certificate's altNames should contain both 'karmada-system.svc.cluster.local' and 'test.svc.cluster.local'.
if cfg.Namespace != constants.KarmadaSystemNamespace {
appendSANsToAltNames(altNames, []string{fmt.Sprintf("*.%s.svc.cluster.local", cfg.Namespace),
fmt.Sprintf("*.%s.svc", cfg.Namespace)})
}

if len(cfg.Components.KarmadaAPIServer.CertSANs) > 0 {
appendSANsToAltNames(altNames, cfg.Components.KarmadaAPIServer.CertSANs)
}
Expand Down
1 change: 1 addition & 0 deletions operator/pkg/util/kubeconfig.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,7 @@ func IsInCluster(hostCluster *operatorv1alpha1.HostCluster) bool {
return hostCluster == nil || hostCluster.SecretRef == nil || len(hostCluster.SecretRef.Name) == 0
}

// BuildClientFromSecretRef builds a clientset from the secret reference.
func BuildClientFromSecretRef(client *clientset.Clientset, ref *operatorv1alpha1.LocalSecretReference) (*clientset.Clientset, error) {
secret, err := client.CoreV1().Secrets(ref.Namespace).Get(context.TODO(), ref.Name, metav1.GetOptions{})
if err != nil {
Expand Down
2 changes: 2 additions & 0 deletions operator/pkg/util/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -162,11 +162,13 @@ func ListFiles(path string) []os.FileInfo {
return files
}

// FileExtInfo file info with absolute path
type FileExtInfo struct {
os.FileInfo
AbsPath string
}

// ListFileWithSuffix traverse directory files with suffix
func ListFileWithSuffix(path, suffix string) []FileExtInfo {
files := []FileExtInfo{}
if err := filepath.Walk(path, func(path string, info os.FileInfo, err error) error {
Expand Down
6 changes: 5 additions & 1 deletion pkg/controllers/cronfederatedhpa/cronfederatedhpa_handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ import (
"fmt"
"sync"
"time"
_ "time/tzdata"
_ "time/tzdata" // import tzdata to support time zone parsing, this is needed by function time.LoadLocation

"github.com/go-co-op/gocron"
autoscalingv2 "k8s.io/api/autoscaling/v2"
Expand All @@ -30,11 +30,13 @@ import (
"github.com/karmada-io/karmada/pkg/util/helper"
)

// RuleCron is the wrapper of gocron.Scheduler and CronFederatedHPARule
type RuleCron struct {
*gocron.Scheduler
autoscalingv1alpha1.CronFederatedHPARule
}

// CronHandler is the handler for CronFederatedHPA
type CronHandler struct {
client client.Client
eventRecorder record.EventRecorder
Expand Down Expand Up @@ -84,6 +86,7 @@ func (c *CronHandler) AddCronExecutorIfNotExist(cronFHPAKey string) {
c.cronExecutorMap[cronFHPAKey] = make(map[string]RuleCron)
}

// RuleCronExecutorExists checks if the executor for specific CronFederatedHPA rule exists
func (c *CronHandler) RuleCronExecutorExists(cronFHPAKey string,
ruleName string) (autoscalingv1alpha1.CronFederatedHPARule, bool) {
c.executorLock.RLock()
Expand Down Expand Up @@ -159,6 +162,7 @@ func (c *CronHandler) CreateCronJobForExecutor(cronFHPA *autoscalingv1alpha1.Cro
return nil
}

// GetRuleNextExecuteTime returns the next execute time of a rule of CronFederatedHPA
func (c *CronHandler) GetRuleNextExecuteTime(cronFHPA *autoscalingv1alpha1.CronFederatedHPA, ruleName string) (time.Time, error) {
c.executorLock.RLock()
defer c.executorLock.RUnlock()
Expand Down
21 changes: 13 additions & 8 deletions pkg/controllers/cronfederatedhpa/cronfederatedhpa_job.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,8 @@ import (
"github.com/karmada-io/karmada/pkg/util/helper"
)

type CronFederatedHPAJob struct {
// ScalingJob is a job to handle CronFederatedHPA.
type ScalingJob struct {
client client.Client
eventRecorder record.EventRecorder
scheduler *gocron.Scheduler
Expand All @@ -48,9 +49,10 @@ type CronFederatedHPAJob struct {
rule autoscalingv1alpha1.CronFederatedHPARule
}

// NewCronFederatedHPAJob creates a new CronFederatedHPAJob.
func NewCronFederatedHPAJob(client client.Client, eventRecorder record.EventRecorder, scheduler *gocron.Scheduler,
cronFHPA *autoscalingv1alpha1.CronFederatedHPA, rule autoscalingv1alpha1.CronFederatedHPARule) *CronFederatedHPAJob {
return &CronFederatedHPAJob{
cronFHPA *autoscalingv1alpha1.CronFederatedHPA, rule autoscalingv1alpha1.CronFederatedHPARule) *ScalingJob {
return &ScalingJob{
client: client,
eventRecorder: eventRecorder,
scheduler: scheduler,
Expand All @@ -62,7 +64,8 @@ func NewCronFederatedHPAJob(client client.Client, eventRecorder record.EventReco
}
}

func RunCronFederatedHPARule(c *CronFederatedHPAJob) {
// RunCronFederatedHPARule runs the job to handle CronFederatedHPA.
func RunCronFederatedHPARule(c *ScalingJob) {
klog.V(4).Infof("Start to handle CronFederatedHPA %s", c.namespaceName)
defer klog.V(4).Infof("End to handle CronFederatedHPA %s", c.namespaceName)

Expand Down Expand Up @@ -122,7 +125,8 @@ func RunCronFederatedHPARule(c *CronFederatedHPAJob) {
})
}

func (c *CronFederatedHPAJob) ScaleFHPA(cronFHPA *autoscalingv1alpha1.CronFederatedHPA) error {
// ScaleFHPA scales FederatedHPA's minReplicas and maxReplicas
func (c *ScalingJob) ScaleFHPA(cronFHPA *autoscalingv1alpha1.CronFederatedHPA) error {
fhpaName := types.NamespacedName{
Namespace: cronFHPA.Namespace,
Name: cronFHPA.Spec.ScaleTargetRef.Name,
Expand Down Expand Up @@ -161,7 +165,8 @@ func (c *CronFederatedHPAJob) ScaleFHPA(cronFHPA *autoscalingv1alpha1.CronFedera
return nil
}

func (c *CronFederatedHPAJob) ScaleWorkloads(cronFHPA *autoscalingv1alpha1.CronFederatedHPA) error {
// ScaleWorkloads scales workload's replicas directly
func (c *ScalingJob) ScaleWorkloads(cronFHPA *autoscalingv1alpha1.CronFederatedHPA) error {
ctx := context.Background()

scaleClient := c.client.SubResource("scale")
Expand Down Expand Up @@ -217,7 +222,7 @@ func (c *CronFederatedHPAJob) ScaleWorkloads(cronFHPA *autoscalingv1alpha1.CronF
return nil
}

func (c *CronFederatedHPAJob) addFailedExecutionHistory(
func (c *ScalingJob) addFailedExecutionHistory(
cronFHPA *autoscalingv1alpha1.CronFederatedHPA, errMsg string) error {
_, nextExecutionTime := c.scheduler.NextRun()

Expand Down Expand Up @@ -269,7 +274,7 @@ func (c *CronFederatedHPAJob) addFailedExecutionHistory(
})
}

func (c *CronFederatedHPAJob) addSuccessExecutionHistory(
func (c *ScalingJob) addSuccessExecutionHistory(
cronFHPA *autoscalingv1alpha1.CronFederatedHPA,
appliedReplicas, appliedMinReplicas, appliedMaxReplicas *int32) error {
_, nextExecutionTime := c.scheduler.NextRun()
Expand Down

0 comments on commit 014737c

Please sign in to comment.