Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Provisioning fixes #32702

Merged
merged 12 commits into from May 14, 2021
2 changes: 1 addition & 1 deletion Dockerfile.dapper
Expand Up @@ -89,7 +89,7 @@ RUN wget -O - ${!DOCKER_URL} > /usr/bin/docker && chmod +x /usr/bin/docker
ENV YQ_URL=https://github.com/mikefarah/yq/releases/download/3.4.1/yq_linux_${ARCH}
RUN wget -O - ${YQ_URL} > /usr/bin/yq && chmod +x /usr/bin/yq

ENV KUBECTL_URL=https://storage.googleapis.com/kubernetes-release/release/v1.11.0/bin/linux/${ARCH}/kubectl
ENV KUBECTL_URL=https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/${ARCH}/kubectl
RUN wget -O - ${KUBECTL_URL} > /usr/bin/kubectl && chmod +x /usr/bin/kubectl

RUN apt-get update && \
Expand Down
5 changes: 3 additions & 2 deletions go.mod
Expand Up @@ -41,6 +41,7 @@ replace (
k8s.io/metrics => k8s.io/metrics v0.21.0
k8s.io/mount-utils => k8s.io/mount-utils v0.21.0
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.21.0
sigs.k8s.io/cluster-api => github.com/rancher/cluster-api v0.3.11-0.20210514043303-8726f6e84d41
)

require (
Expand Down Expand Up @@ -100,8 +101,8 @@ require (
github.com/rancher/fleet/pkg/apis v0.0.0-20210428191153-f414eab0e4de
github.com/rancher/gke-operator v1.0.1
github.com/rancher/kubernetes-provider-detector v0.1.2
github.com/rancher/lasso v0.0.0-20210408231703-9ddd9378d08d
github.com/rancher/lasso/controller-runtime v0.0.0-20210219163000-fcdfcec12969
github.com/rancher/lasso v0.0.0-20210512210011-31c3967449d3
github.com/rancher/lasso/controller-runtime v0.0.0-20210512210011-31c3967449d3
github.com/rancher/machine v0.15.0-rancher57
github.com/rancher/norman v0.0.0-20210504005327-7b74a9f308a7
github.com/rancher/rancher/pkg/apis v0.0.0
Expand Down
11 changes: 6 additions & 5 deletions go.sum
Expand Up @@ -952,6 +952,8 @@ github.com/rancher/channelserver v0.5.1-0.20210421200213-5495c5f6e430 h1:Wes0yXZ
github.com/rancher/channelserver v0.5.1-0.20210421200213-5495c5f6e430/go.mod h1:1/9aBa8mXEIinRiT6Qlv/zbt6Nv9uorv+kFVNYvpmiI=
github.com/rancher/client-go v0.21.0-rancher.1 h1:WVy7jpVmdjIRVkgXpBRlNx4jnGkLCFS1Qu/hUFCOGN4=
github.com/rancher/client-go v0.21.0-rancher.1/go.mod h1:nNBytTF9qPFDEhoqgEPaarobC8QPae13bElIVHzIglA=
github.com/rancher/cluster-api v0.3.11-0.20210514043303-8726f6e84d41 h1:4YsdMzVRGze/N9cAgeqbGNTjuY08wtVB92+OinhOtw8=
github.com/rancher/cluster-api v0.3.11-0.20210514043303-8726f6e84d41/go.mod h1:iDmIO2c5/RSiSciQnI7Xocfe8hTu+KafcgEuc/hnhS4=
github.com/rancher/dynamiclistener v0.2.1-0.20200714201033-9c1939da3af9/go.mod h1:qr0QfhwzcVCR+Ao9WyfnE+jmOpfEAdRhXtNOZGJ3nCQ=
github.com/rancher/dynamiclistener v0.2.1-0.20201110045217-9b1b7d3132e8 h1:B4dt8sHPyt+hzYzFoWtKWTppls0KzSu2jEIV2jyY4sY=
github.com/rancher/dynamiclistener v0.2.1-0.20201110045217-9b1b7d3132e8/go.mod h1:qr0QfhwzcVCR+Ao9WyfnE+jmOpfEAdRhXtNOZGJ3nCQ=
Expand All @@ -970,10 +972,11 @@ github.com/rancher/lasso v0.0.0-20200515155337-a34e1e26ad91/go.mod h1:G6Vv2aj6xB
github.com/rancher/lasso v0.0.0-20200820172840-0e4cc0ef5cb0/go.mod h1:OhBBBO1pBwYp0hacWdnvSGOj+XE9yMLOLnaypIlic18=
github.com/rancher/lasso v0.0.0-20200905045615-7fcb07d6a20b/go.mod h1:OhBBBO1pBwYp0hacWdnvSGOj+XE9yMLOLnaypIlic18=
github.com/rancher/lasso v0.0.0-20210218205127-cbbdbcc1c003/go.mod h1:OhBBBO1pBwYp0hacWdnvSGOj+XE9yMLOLnaypIlic18=
github.com/rancher/lasso v0.0.0-20210408231703-9ddd9378d08d h1:vfjPEF6M7Jf1/zK1xF7z2drLfniooKcgDQdoXO5+U7w=
github.com/rancher/lasso v0.0.0-20210408231703-9ddd9378d08d/go.mod h1:OhBBBO1pBwYp0hacWdnvSGOj+XE9yMLOLnaypIlic18=
github.com/rancher/lasso/controller-runtime v0.0.0-20210219163000-fcdfcec12969 h1:hIAPazatZTEnV5rHCG961B0PFwq43mGK4r5JSQdqkug=
github.com/rancher/lasso/controller-runtime v0.0.0-20210219163000-fcdfcec12969/go.mod h1:+0JeyzD5Cb1eavAbVF1+7SfQUItne0C+UxsTk+PU1Ms=
github.com/rancher/lasso v0.0.0-20210512210011-31c3967449d3 h1:XXC5QpCq78JXDfuhBxa2c0dAAUiAcO1lzKdV6EccsjQ=
github.com/rancher/lasso v0.0.0-20210512210011-31c3967449d3/go.mod h1:OhBBBO1pBwYp0hacWdnvSGOj+XE9yMLOLnaypIlic18=
github.com/rancher/lasso/controller-runtime v0.0.0-20210512210011-31c3967449d3 h1:72p3wzjiunmDZwnzMbqNcuF5OWT9VAPaNT87p5BXTN4=
github.com/rancher/lasso/controller-runtime v0.0.0-20210512210011-31c3967449d3/go.mod h1:+0JeyzD5Cb1eavAbVF1+7SfQUItne0C+UxsTk+PU1Ms=
github.com/rancher/machine v0.15.0-rancher57 h1:OGzaz1cuzC6oVZfywB8U22iOiSH3yYvIsh2PCIfcSeY=
github.com/rancher/machine v0.15.0-rancher57/go.mod h1:VXpVeBwwGyMOuuUWaNKBxm54Qz9g9Vp6LDkaOtDX0lo=
github.com/rancher/moq v0.0.0-20200712062324-13d1f37d2d77 h1:k+vzmkZQsH06rZnDr+phskSixG9ByNj9gVdzHcc8nxw=
Expand Down Expand Up @@ -1774,8 +1777,6 @@ sigs.k8s.io/aws-iam-authenticator v0.5.1 h1:0Nv09uOayy99IOYgNamMl0cwTuQWRtEuUu6s
sigs.k8s.io/aws-iam-authenticator v0.5.1/go.mod h1:yPDLi58MDx1UtCrRMOykLm1IyKKPGHgcGCafcbn2s3E=
sigs.k8s.io/cli-utils v0.16.0 h1:Wr32m1oxjIqc9G9l+igr13PeIM9LCyq8jQ8KjXKelvg=
sigs.k8s.io/cli-utils v0.16.0/go.mod h1:9Jqm9K2W6ShhCxsEuaz6HSRKKOXigPUx3ZfypGgxBLY=
sigs.k8s.io/cluster-api v0.3.11-0.20210430180359-45b6080c2764 h1:KR5002Y1x1g+BYmsoaODC/Z43xdKSY0B6bOcwfBa9Bk=
sigs.k8s.io/cluster-api v0.3.11-0.20210430180359-45b6080c2764/go.mod h1:iDmIO2c5/RSiSciQnI7Xocfe8hTu+KafcgEuc/hnhS4=
sigs.k8s.io/controller-runtime v0.4.0/go.mod h1:ApC79lpY3PHW9xj/w9pj+lYkLgwAAUZwfXkME1Lajns=
sigs.k8s.io/controller-runtime v0.8.2/go.mod h1:U/l+DUopBc1ecfRZ5aviA9JDmGFQKvLf5YkZNx2e0sU=
sigs.k8s.io/controller-runtime v0.9.0-beta.0 h1:GMy39zuf8ywrQyNuc24wR1mdiJCSGJnxaaqakZldaug=
Expand Down
5 changes: 4 additions & 1 deletion pkg/controllers/provisioningv2/cluster/controller.go
Expand Up @@ -2,6 +2,8 @@ package cluster

import (
"context"
"crypto/sha256"
"encoding/hex"
"errors"

"github.com/rancher/norman/types/convert"
Expand Down Expand Up @@ -199,9 +201,10 @@ func (h *handler) createCluster(cluster *v1.Cluster, status v1.ClusterStatus, sp
}
}

hash := sha256.Sum256([]byte(cluster.Namespace + "/" + cluster.Name))
newCluster := &v3.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: name.SafeConcatName("c", "m", string(cluster.UID[:8])),
Name: name.SafeConcatName("c", "m", hex.EncodeToString(hash[:])[:8]),
Labels: cluster.Labels,
Annotations: map[string]string{},
},
Expand Down
14 changes: 12 additions & 2 deletions pkg/controllers/provisioningv2/rke2/bootstrap/controller.go
Expand Up @@ -13,6 +13,7 @@ import (
"github.com/rancher/rancher/pkg/provisioningv2/rke2/planner"
"github.com/rancher/rancher/pkg/wrangler"
corecontrollers "github.com/rancher/wrangler/pkg/generated/controllers/core/v1"
"github.com/rancher/wrangler/pkg/generic"
"github.com/rancher/wrangler/pkg/name"
"github.com/rancher/wrangler/pkg/relatedresource"
corev1 "k8s.io/api/core/v1"
Expand Down Expand Up @@ -79,8 +80,16 @@ func Register(ctx context.Context, clients *wrangler.Context) {
}, nil
}
}
if machine, ok := obj.(*capi.Machine); ok {
if machine.Spec.Bootstrap.ConfigRef != nil && machine.Spec.Bootstrap.ConfigRef.Kind == "RKEBootstrap" {
return []relatedresource.Key{{
Namespace: machine.Namespace,
Name: machine.Spec.Bootstrap.ConfigRef.Name,
}}, nil
}
}
return nil, nil
}, clients.RKE.RKEBootstrap(), clients.Core.ServiceAccount())
}, clients.RKE.RKEBootstrap(), clients.Core.ServiceAccount(), clients.CAPI.Machine())
}

func (h *handler) getBootstrapSecret(namespace, name string, envVars []corev1.EnvVar) (*corev1.Secret, error) {
Expand Down Expand Up @@ -191,7 +200,7 @@ func (h *handler) getMachine(obj *rkev1.RKEBootstrap) (*capi.Machine, error) {

return h.machineCache.Get(obj.Namespace, ref.Name)
}
return nil, fmt.Errorf("no machine associated to RKEBootstrap %s/%s", obj.Namespace, obj.Name)
return nil, generic.ErrSkip
}

func (h *handler) getEnvVar(machine *capi.Machine) ([]corev1.EnvVar, error) {
Expand Down Expand Up @@ -235,6 +244,7 @@ func (h *handler) assignBootStrapSecret(machine *capi.Machine, obj *rkev1.RKEBoo
Namespace: obj.Namespace,
Labels: map[string]string{
planner.MachineNameLabel: machine.Name,
rkeBootstrapName: obj.Name,
roleLabel: roleBootstrap,
},
},
Expand Down
Expand Up @@ -15,6 +15,7 @@ import (
"github.com/rancher/rancher/pkg/provisioningv2/rke2/planner"
"github.com/rancher/rancher/pkg/wrangler"
"github.com/rancher/wrangler/pkg/data"
"github.com/rancher/wrangler/pkg/generic"
corev1 "k8s.io/api/core/v1"
apierror "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand Down Expand Up @@ -59,7 +60,7 @@ func Register(ctx context.Context, clients *wrangler.Context) {
dynamic: clients.Dynamic,
}

clients.RKE.RKEBootstrap().OnChange(ctx, "machine-provider-sync", h.associateMachineWithNode)
clients.RKE.RKEBootstrap().OnChange(ctx, "machine-node-lookup", h.associateMachineWithNode)
}

func (h *handler) getMachine(obj *rkev1.RKEBootstrap) (*capi.Machine, error) {
Expand All @@ -72,7 +73,7 @@ func (h *handler) getMachine(obj *rkev1.RKEBootstrap) (*capi.Machine, error) {

return h.machineCache.Get(obj.Namespace, ref.Name)
}
return nil, fmt.Errorf("no machine associated to RKEBootstrap %s/%s", obj.Namespace, obj.Name)
return nil, generic.ErrSkip
}

func (h *handler) associateMachineWithNode(_ string, bootstrap *rkev1.RKEBootstrap) (*rkev1.RKEBootstrap, error) {
Expand Down
16 changes: 13 additions & 3 deletions pkg/controllers/provisioningv2/rke2/machineprovision/args.go
Expand Up @@ -9,6 +9,7 @@ import (
rkev1 "github.com/rancher/rancher/pkg/apis/rke.cattle.io/v1"
"github.com/rancher/rancher/pkg/settings"
"github.com/rancher/wrangler/pkg/data"
"github.com/rancher/wrangler/pkg/generic"
name2 "github.com/rancher/wrangler/pkg/name"
corev1 "k8s.io/api/core/v1"
apierror "k8s.io/apimachinery/pkg/api/errors"
Expand All @@ -19,7 +20,12 @@ import (
)

var (
regExHyphen = regexp.MustCompile("([a-z])([A-Z])")
regExHyphen = regexp.MustCompile("([a-z])([A-Z])")
envNameOverride = map[string]string{
"amazonec2": "AWS",
"rackspace": "OS",
"openstack": "OS",
}
)

type driverArgs struct {
Expand Down Expand Up @@ -77,7 +83,11 @@ func (h *handler) getArgsEnvAndStatus(typeMeta meta.Type, meta metav1.Object, da
}

for k, v := range secrets {
k := strings.ToUpper(driver + "_" + regExHyphen.ReplaceAllString(k, "${1}_${2}"))
envName := envNameOverride[driver]
if envName == "" {
envName = driver
}
k := strings.ToUpper(envName + "_" + regExHyphen.ReplaceAllString(k, "${1}_${2}"))
secret.Data[k] = []byte(v)
}

Expand Down Expand Up @@ -162,7 +172,7 @@ func (h *handler) getSecretData(meta metav1.Object, obj data.Object) (string, st
}

if machine == nil {
return "", "", nil, fmt.Errorf("failed to find capi machine for %s/%s", meta.GetNamespace(), meta.GetName())
return "", "", nil, generic.ErrSkip
}

if cloudCredentialSecretName == "" {
Expand Down
Expand Up @@ -2,18 +2,38 @@ package rkecontrolplane

import (
"context"
"time"

rkev1 "github.com/rancher/rancher/pkg/apis/rke.cattle.io/v1"
mgmtcontrollers "github.com/rancher/rancher/pkg/generated/controllers/management.cattle.io/v3"
rkecontrollers "github.com/rancher/rancher/pkg/generated/controllers/rke.cattle.io/v1"
"github.com/rancher/rancher/pkg/wrangler"
"github.com/rancher/wrangler/pkg/condition"
)

func Register(ctx context.Context, clients *wrangler.Context) {
h := &handler{
clusterCache: clients.Mgmt.Cluster().Cache(),
rkeControlPlaneController: clients.RKE.RKEControlPlane(),
}

rkecontrollers.RegisterRKEControlPlaneStatusHandler(ctx, clients.RKE.RKEControlPlane(),
"", "rke-control-plane",
func(obj *rkev1.RKEControlPlane, status rkev1.RKEControlPlaneStatus) (rkev1.RKEControlPlaneStatus, error) {
status.Ready = true
status.ObservedGeneration = obj.Generation
return status, nil
})
"", "rke-control-plane", h.OnChange)
}

type handler struct {
clusterCache mgmtcontrollers.ClusterCache
rkeControlPlaneController rkecontrollers.RKEControlPlaneController
}

func (h *handler) OnChange(obj *rkev1.RKEControlPlane, status rkev1.RKEControlPlaneStatus) (rkev1.RKEControlPlaneStatus, error) {
status.ObservedGeneration = obj.Generation
cluster, err := h.clusterCache.Get(obj.Spec.ManagementClusterName)
if err != nil {
h.rkeControlPlaneController.EnqueueAfter(obj.Namespace, obj.Name, 2*time.Second)
return status, nil
}

status.Ready = condition.Cond("Ready").IsTrue(cluster)
return status, nil
}
Expand Up @@ -275,7 +275,7 @@ func (h *handler) getCAPICluster(secret *corev1.Secret) (*capi.Cluster, error) {
}

func (h *handler) onUnmanagedMachineChange(key string, machine *rkev1.CustomMachine) (*rkev1.CustomMachine, error) {
if machine != nil && !machine.Status.Ready {
if machine != nil && !machine.Status.Ready && machine.Spec.ProviderID != "" {
machine = machine.DeepCopy()
machine.Status.Ready = true
return h.unmanagedMachine.UpdateStatus(machine)
Expand Down
2 changes: 1 addition & 1 deletion pkg/features/feature.go
Expand Up @@ -64,7 +64,7 @@ var (
"Enable legacy features",
true,
false,
false)
true)
ProvisioningV2 = newFeature(
"provisioningv2",
"Enable cluster-api based provisioning framework",
Expand Down
21 changes: 19 additions & 2 deletions pkg/provisioningv2/rke2/planner/planner.go
Expand Up @@ -24,6 +24,7 @@ import (
"github.com/rancher/rancher/pkg/wrangler"
"github.com/rancher/wrangler/pkg/data/convert"
corecontrollers "github.com/rancher/wrangler/pkg/generated/controllers/core/v1"
"github.com/rancher/wrangler/pkg/generic"
"github.com/rancher/wrangler/pkg/name"
"github.com/rancher/wrangler/pkg/randomtoken"
"github.com/rancher/wrangler/pkg/summary"
Expand Down Expand Up @@ -144,7 +145,7 @@ func PlanSecretFromBootstrapName(bootstrapName string) string {
func (p *Planner) getCAPICluster(controlPlane *rkev1.RKEControlPlane) (*capi.Cluster, error) {
ref := metav1.GetControllerOf(controlPlane)
if ref == nil {
return nil, fmt.Errorf("RKEControlPlane %s/%s has no owner", controlPlane.Namespace, controlPlane.Name)
return nil, generic.ErrSkip
}
gvk := schema.FromAPIVersionAndKind(ref.APIVersion, ref.Kind)
if gvk.Kind != "Cluster" || gvk.Group != "cluster.x-k8s.io" {
Expand Down Expand Up @@ -232,6 +233,10 @@ func (p *Planner) Process(controlPlane *rkev1.RKEControlPlane) error {
}

joinServer = p.getControlPlaneJoinURL(plan)
if joinServer == "" {
return ErrWaiting("waiting for control plane to be available")
}

err = p.reconcile(controlPlane, secret, plan, "worker", false, isOnlyWorker, isInitNode,
controlPlane.Spec.UpgradeStrategy.WorkerConcurrency, joinServer,
controlPlane.Spec.UpgradeStrategy.WorkerDrainOptions)
Expand Down Expand Up @@ -501,6 +506,12 @@ func (p *Planner) addETCDSnapshotCredential(config map[string]interface{}, contr
return nil
}

func addDefaults(config map[string]interface{}, controlPlane *rkev1.RKEControlPlane, machine *capi.Machine) {
if GetRuntime(controlPlane.Spec.KubernetesVersion) == RuntimeRKE2 {
config["cni"] = "calico"
}
}

func addUserConfig(config map[string]interface{}, controlPlane *rkev1.RKEControlPlane, machine *capi.Machine) error {
for _, opts := range controlPlane.Spec.NodeConfig {
sel, err := metav1.LabelSelectorAsSelector(opts.MachineLabelSelector)
Expand Down Expand Up @@ -655,7 +666,11 @@ func (p *Planner) addInstruction(nodePlan plan.NodePlan, controlPlane *rkev1.RKE
}

if isOnlyWorker(machine) {
instruction.Env = append(instruction.Env, fmt.Sprintf("INSTALL_%s_EXEC=agent", GetRuntimeEnv(controlPlane.Spec.KubernetesVersion)))
if GetRuntime(controlPlane.Spec.KubernetesVersion) == RuntimeRKE2 {
instruction.Env = append(instruction.Env, fmt.Sprintf("INSTALL_%s_TYPE=agent", GetRuntimeEnv(controlPlane.Spec.KubernetesVersion)))
} else {
instruction.Env = append(instruction.Env, fmt.Sprintf("INSTALL_%s_EXEC=agent", GetRuntimeEnv(controlPlane.Spec.KubernetesVersion)))
}
}
nodePlan.Instructions = append(nodePlan.Instructions, instruction)
return nodePlan, nil
Expand Down Expand Up @@ -745,6 +760,8 @@ func (p *Planner) addConfigFile(nodePlan plan.NodePlan, controlPlane *rkev1.RKEC
initNode bool, joinServer string) (plan.NodePlan, error) {
config := map[string]interface{}{}

addDefaults(config, controlPlane, machine)

// Must call addUserConfig first because it will filter out non-kdm data
if err := addUserConfig(config, controlPlane, machine); err != nil {
return nodePlan, err
Expand Down
28 changes: 28 additions & 0 deletions scripts/provisioning-tests
@@ -0,0 +1,28 @@
#!/bin/bash
set -e

cd $(dirname $0)/..

echo Starting rancher server
mkdir -p /var/lib/rancher/k3s/agent/images
grep PodTestImage ./tests/integration/pkg/defaults/defaults.go | cut -f2 -d'"' > /var/lib/rancher/k3s/agent/images/pull.txt
grep MachineProvisionImage ./pkg/settings/setting.go | cut -f4 -d'"' >> /var/lib/rancher/k3s/agent/images/pull.txt
touch /tmp/rancher.log
./scripts/run >/tmp/rancher.log 2>&1 &
PID=$!

# uncomment to get startup logs. Don't leave them on because it slows drone down too
# much
#tail -F /tmp/rancher.log &
#TPID=$!
while ! curl -sf http://localhost:8080/ping; do
sleep 2
done
while ! kubectl --kubeconfig /etc/rancher/k3s/k3s.yaml rollout status -w -n cattle-system deploy/rancher-webhook; do
sleep 2
done
#kill $TPID

echo Running tests
export KUBECONFIG=/etc/rancher/k3s/k3s.yaml
go test -v -timeout 20m ./tests/integration/pkg/tests/...
2 changes: 1 addition & 1 deletion scripts/run
Expand Up @@ -6,7 +6,7 @@ cd $(dirname $0)/..

CMD=bin/rancher
if [ ! -x $CMD ]; then
./scripts/build
./scripts/build-server
fi

rm -rf build/testdata
Expand Down
8 changes: 0 additions & 8 deletions scripts/test
Expand Up @@ -37,16 +37,8 @@ done

echo Running tests

#KUBECONFIG=/etc/rancher/k3s/k3s.yaml go test -parallel 4 ./tests/integration/pkg/tests/... >/tmp/rancher-test.log 2>&1 &
#TESTPID=$!

cd ./tests/integration
tox -e rancher -- -m "not nonparallel" -n $(nproc)
tox -e rancher -- -m nonparallel

#tail -f /tmp/rancher-test.log &
#echo Waiting on go tests
#wait -n $TESTPID || {
#cat /tmp/rancher-test.log
#exit 1
#}
5 changes: 5 additions & 0 deletions tests/integration/pkg/clients/clients.go
Expand Up @@ -2,9 +2,11 @@ package clients

import (
"context"
"time"

"github.com/rancher/rancher/pkg/wrangler"
"github.com/rancher/wrangler/pkg/kubeconfig"
"github.com/rancher/wrangler/pkg/ratelimit"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/tools/clientcmd"
Expand Down Expand Up @@ -58,6 +60,9 @@ func NewForConfig(ctx context.Context, config clientcmd.ClientConfig) (*Clients,
return nil, err
}

rest.Timeout = 30 * time.Minute
rest.RateLimiter = ratelimit.None

context, err := wrangler.NewContext(ctx, config, rest)
if err != nil {
return nil, err
Expand Down