-
Notifications
You must be signed in to change notification settings - Fork 773
Open
Labels
area/portfwddocumentationImprovements or additions to documentationImprovements or additions to documentationregressionUsed to work but has been brokenUsed to work but has been broken
Description
Description
Using the following yaml disabling port forwarding, I see that ports are forwarded. Same yaml works with lima 1.2.1.
The expected behavior is seeing only ssh port forwarding.
Version: 2.0.1 (from brew).
% grep Forward drenv.log
2025-11-24 19:10:51,817 DEBUG [cluster] [hostagent] Forwarding TCP from 127.0.0.1:42585 to 127.0.0.1:42585
2025-11-24 19:11:29,525 DEBUG [cluster] [hostagent] Forwarding TCP from 127.0.0.1:45901 to 127.0.0.1:45901
2025-11-24 19:11:29,583 DEBUG [cluster] [hostagent] Forwarding TCP from 127.0.0.1:45267 to 127.0.0.1:45267
2025-11-24 19:11:37,999 DEBUG [cluster] [hostagent] Forwarding TCP from 127.0.0.1:10248 to 127.0.0.1:10248
2025-11-24 19:11:40,134 DEBUG [cluster] [hostagent] Forwarding TCP from 127.0.0.1:2379 to 127.0.0.1:2379
2025-11-24 19:11:40,168 DEBUG [cluster] [hostagent] Forwarding TCP from 127.0.0.1:2381 to 127.0.0.1:2381
2025-11-24 19:11:40,218 DEBUG [cluster] [hostagent] Forwarding TCP from 127.0.0.1:10257 to 127.0.0.1:10257
2025-11-24 19:11:40,592 DEBUG [cluster] [hostagent] Forwarding TCP from 127.0.0.1:10259 to 127.0.0.1:10259
2025-11-24 19:11:43,693 DEBUG [cluster] [hostagent] Forwarding TCP from 127.0.0.1:10248 to 127.0.0.1:10248images:
- location: https://cloud-images.ubuntu.com/releases/24.04/release/ubuntu-24.04-server-cloudimg-arm64.img
arch: aarch64
- location: https://cloud-images.ubuntu.com/releases/24.04/release/ubuntu-24.04-server-cloudimg-amd64.img
arch: x86_64
mounts: []
containerd:
system: true
user: false
portForwards:
- ignore: true
proto: any
guestIP: 0.0.0.0
param:
LOCAL_REGISTRY: host.lima.internal:5050
provision:
- mode: system
script: |
#!/bin/bash
set -eux -o pipefail
command -v kubeadm >/dev/null 2>&1 && exit 0
# Install and configure prerequisites
cat <<EOF | tee /etc/modules-load.d/containerd.conf
overlay
br_netfilter
EOF
modprobe overlay
modprobe br_netfilter
cat <<EOF | tee /etc/sysctl.d/99-kubernetes-cri.conf
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF
# Avoid "failed to creating a fsnotify watcher: too many open files"
# errors with bigger setups.
cat <<EOF | tee /etc/sysctl.d/99-fs-inotify.conf
fs.inotify.max_user_instances = 8192
fs.inotify.max_user_watches = 65536
EOF
sysctl --system
# Installing kubeadm, kubelet and kubectl
export DEBIAN_FRONTEND=noninteractive
apt-get update
apt-get install -y apt-transport-https ca-certificates curl
VERSION=1.34
echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v${VERSION}/deb/ /" | tee /etc/apt/sources.list.d/kubernetes.list
curl -fsSL https://pkgs.k8s.io/core:/stable:/v${VERSION}/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
apt-get update
# cri-tools
apt-get install -y cri-tools
cat <<EOF | tee /etc/crictl.yaml
runtime-endpoint: unix:///run/containerd/containerd.sock
EOF
# cni-plugins
apt-get install -y kubernetes-cni
rm -f /etc/cni/net.d/*.conf*
apt-get install -y kubelet kubeadm kubectl && apt-mark hold kubelet kubeadm kubectl
systemctl enable --now kubelet
- mode: system
script: |
#!/bin/bash
set -eux -o pipefail
test -e /etc/containerd/conf.d/k8s.toml && exit 0
mkdir -p /etc/containerd/conf.d
# Configuring the systemd cgroup driver
# Overriding the sandbox (pause) image
cat <<EOF >/etc/containerd/conf.d/k8s.toml
version = 2
[plugins]
[plugins."io.containerd.grpc.v1.cri"]
sandbox_image = "$(kubeadm config images list | grep pause | sort -r | head -n1)"
# Ramen: Allow unprivileged pods to access block devices.
device_ownership_from_security_context = true
[plugins."io.containerd.grpc.v1.cri".containerd]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v2"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
SystemdCgroup = true
[plugins."io.containerd.grpc.v1.cri".registry]
config_path = "/etc/containerd/certs.d"
[plugins."io.containerd.cri.v1.runtime".cni]
bin_dirs = ["/usr/local/libexec/cni","/opt/cni/bin"]
EOF
systemctl restart containerd
- mode: system
script: |
#!/bin/bash
set -eux -o pipefail
test -z "{{.Param.LOCAL_REGISTRY}}" && exit 0
test -f "/etc/containerd/certs.d/{{.Param.LOCAL_REGISTRY}}/hosts.toml" && exit 0
mkdir -p "/etc/containerd/certs.d/{{.Param.LOCAL_REGISTRY}}"
cat << EOF > "/etc/containerd/certs.d/{{.Param.LOCAL_REGISTRY}}/hosts.toml"
server = "http://{{.Param.LOCAL_REGISTRY}}"
[host."http://{{.Param.LOCAL_REGISTRY}}"]
skip_verify = true
EOF
systemctl restart containerd
- mode: system
script: |
#!/bin/bash
set -eux -o pipefail
test -e /etc/kubernetes/admin.conf && exit 0
export KUBECONFIG=/etc/kubernetes/admin.conf
# Ramen: serve the addiontal shared network instead of the user network.
export ADVERTISE_ADDRESS=$(ip -j -4 addr show dev lima0 | jq -r '.[0].addr_info[0].local')
# Ramen: Use local registry for k8s images
if [ -n "{{.Param.LOCAL_REGISTRY}}" ]; then
IMAGE_REPOSITORY="{{.Param.LOCAL_REGISTRY}}/k8s"
else
IMAGE_REPOSITORY=""
fi
# Initializing your control-plane node
cat <<EOF >kubeadm-config.yaml
kind: InitConfiguration
apiVersion: kubeadm.k8s.io/v1beta3
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
kubeletExtraArgs:
# Ramen: use specific network
node-ip: "$ADVERTISE_ADDRESS"
# Ramen: speed up image pulls
serialize-image-pulls: "false"
# Ramen: serve specific network.
localAPIEndpoint:
advertiseAddress: "$ADVERTISE_ADDRESS"
---
kind: ClusterConfiguration
apiVersion: kubeadm.k8s.io/v1beta3
imageRepository: "$IMAGE_REPOSITORY"
apiServer:
certSANs: # --apiserver-cert-extra-sans
- "127.0.0.1"
networking:
podSubnet: "10.244.0.0/16" # --pod-network-cidr
---
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
cgroupDriver: systemd
featureGates:
StatefulSetAutoDeletePVC: true
EOF
# We ignore NumCPU preflight error for running a minimal cluster in
# github actions and for testing drenv.
# [ERROR NumCPU]: the number of available CPUs 1 is less than the required 2
kubeadm init --config kubeadm-config.yaml --ignore-preflight-errors NumCPU
# Scale down coredns like minikube
kubectl scale deploy coredns -n kube-system --replicas=1
# Installing a Pod network add-on
kubectl apply -f https://github.com/flannel-io/flannel/releases/download/v0.24.0/kube-flannel.yml
# Control plane node isolation
kubectl taint nodes --all node-role.kubernetes.io/control-plane-
- mode: system
script: |
#!/bin/bash
set -eux -o pipefail
KUBECONFIG=/etc/kubernetes/admin.conf
mkdir -p ${HOME:-/root}/.kube
cp -f $KUBECONFIG ${HOME:-/root}/.kube/config
mkdir -p {{.Home}}/.kube
cp -f $KUBECONFIG {{.Home}}/.kube/config
chown -R {{.User}} {{.Home}}/.kube
probes:
- description: kubeadm installed
script: |
#!/bin/bash
set -eux -o pipefail
if ! timeout 30s bash -c "until command -v kubeadm >/dev/null 2>&1; do sleep 1; done"; then
echo >&2 "kubeadm is not installed yet"
exit 1
fi
hint: |
See "/var/log/cloud-init-output.log". in the guest
- description: kubeadm completed
script: |
#!/bin/bash
set -eux -o pipefail
if ! timeout 300s bash -c "until test -f /etc/kubernetes/admin.conf; do sleep 1; done"; then
echo >&2 "k8s is not running yet"
exit 1
fi
hint: |
The k8s kubeconfig file has not yet been created.
- description: kubernetes cluster is ready
script: |
#!/bin/bash
set -eux -o pipefail
if ! timeout 300s bash -c "until kubectl get --raw /readyz >/dev/null 2>&1; do sleep 1; done"; then
echo >&2 "kubernetes cluster is not ready yet"
exit 1
fi
copyToHost:
- guest: /etc/kubernetes/admin.conf
host: '{{.Dir}}/copied-from-guest/kubeconfig.yaml'
deleteOnStop: true
vmType: vz
networks:
- socket: /var/run/socket_vmnet
cpus: 2
memory: 3g
disk: 20g
additionalDisks: []Metadata
Metadata
Assignees
Labels
area/portfwddocumentationImprovements or additions to documentationImprovements or additions to documentationregressionUsed to work but has been brokenUsed to work but has been broken