Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove hubble-cli sub-chart #11806

Merged
merged 2 commits into from
Jun 2, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
10 changes: 0 additions & 10 deletions install/kubernetes/cilium/charts/hubble-cli/Chart.yaml

This file was deleted.

This file was deleted.

This file was deleted.

16 changes: 0 additions & 16 deletions install/kubernetes/cilium/charts/hubble-cli/values.yaml

This file was deleted.

3 changes: 0 additions & 3 deletions install/kubernetes/cilium/requirements.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,6 @@ dependencies:
- name: config
version: 1.8.90
condition: config.enabled
- name: hubble-cli
version: 1.8.90
condition: global.hubble.cli.enabled
- name: hubble-relay
version: 1.8.90
condition: global.hubble.relay.enabled
Expand Down
2 changes: 0 additions & 2 deletions install/kubernetes/cilium/templates/NOTES.txt
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,6 @@
You have preserved the configMap and successfully installed {{ title .Chart.Name}}.
{{- else if (and (.Values.global.hubble.enabled) (.Values.global.hubble.ui.enabled)) }}
You have successfully installed Cilium with hubble-ui.
{{- else if (and (.Values.global.hubble.enabled) (.Values.global.hubble.cli.enabled) (.Values.global.hubble.ui.enabled)) }}
You have successfully installed Cilium with hubble-ui and hubble-cli.
{{- else }}
You have successfully installed {{ title .Chart.Name }}.
{{- end }}
Expand Down
4 changes: 1 addition & 3 deletions install/kubernetes/cilium/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -485,9 +485,7 @@ global:
- drop
serviceMonitor:
enabled: false
# Configures the hubble-cli subchart
cli:
enabled: false

# Configures the hubble-relay subchart
relay:
enabled: false
Expand Down
3 changes: 0 additions & 3 deletions test/helpers/cons.go
Original file line number Diff line number Diff line change
Expand Up @@ -74,9 +74,6 @@ const (
// CiliumOperatorLabel is the label used in the Cilium Operator deployment
CiliumOperatorLabel = "io.cilium/app=operator"

// HubbleClientLabel is the label used for Hubble client pods
HubbleClientLabel = "k8s-app=hubble-cli"

// HubbleRelayLabel is the label used for the Hubble Relay deployment
HubbleRelayLabel = "k8s-app=hubble-relay"

Expand Down
32 changes: 0 additions & 32 deletions test/helpers/kubectl.go
Original file line number Diff line number Diff line change
Expand Up @@ -2260,12 +2260,6 @@ func (kub *Kubectl) DeleteCiliumDS() error {
return kub.waitToDelete("Cilium", CiliumAgentLabel)
}

func (kub *Kubectl) DeleteHubbleClientPods(ns string) error {
ginkgoext.By("DeleteHubbleClientPods(namespace=%q)", ns)
_ = kub.DeleteResource("ds", fmt.Sprintf("-n %s hubble-cli", ns))
return kub.waitToDelete("HubbleClient", HubbleClientLabel)
}

func (kub *Kubectl) DeleteHubbleRelay(ns string) error {
ginkgoext.By("DeleteHubbleRelay(namespace=%q)", ns)
_ = kub.DeleteResource("deployment", fmt.Sprintf("-n %s hubble-relay", ns))
Expand Down Expand Up @@ -3316,21 +3310,6 @@ func (kub *Kubectl) GetCiliumPodOnNode(namespace string, node string) (string, e
return res.Output().String(), nil
}

// GetCiliumPodOnNode returns the name of the Hubble client pod that is running
// on / in the specified node / namespace.
func (kub *Kubectl) GetHubbleClientPodOnNode(namespace string, node string) (string, error) {
filter := fmt.Sprintf(
"-o jsonpath='{.items[?(@.spec.nodeName == \"%s\")].metadata.name}'", node)

res := kub.ExecShort(fmt.Sprintf(
"%s -n %s get pods -l %s %s", KubectlCmd, namespace, HubbleClientLabel, filter))
if !res.WasSuccessful() {
return "", fmt.Errorf("Hubble pod not found on node '%s': %s", node, res.OutputPrettyPrint())
}

return res.Output().String(), nil
}

// GetNodeInfo provides the node name and IP address based on the label
// (eg helpers.K8s1 or helpers.K8s2)
func (kub *Kubectl) GetNodeInfo(label string) (nodeName, nodeIP string) {
Expand All @@ -3351,17 +3330,6 @@ func (kub *Kubectl) GetCiliumPodOnNodeWithLabel(namespace string, label string)
return kub.GetCiliumPodOnNode(namespace, node)
}

// GetHubbleClientPodOnNodeWithLabel returns the name of the Hubble client pod
// that is running on node with cilium.io/ci-node label
func (kub *Kubectl) GetHubbleClientPodOnNodeWithLabel(namespace string, label string) (string, error) {
node, err := kub.GetNodeNameByLabel(label)
if err != nil {
return "", fmt.Errorf("Unable to get nodes with label '%s': %s", label, err)
}

return kub.GetHubbleClientPodOnNode(namespace, node)
}

func (kub *Kubectl) validateCilium() error {
var g errgroup.Group

Expand Down
12 changes: 2 additions & 10 deletions test/k8sT/Policies.go
Original file line number Diff line number Diff line change
Expand Up @@ -98,9 +98,7 @@ var _ = Describe("K8sPolicyTest", func() {
daemonCfg = map[string]string{
"global.tls.secretsBackend": "k8s",
"global.debug.verbose": "flow",
// enable hubble server and the CLI client
"global.hubble.enabled": "true",
"global.hubble.cli.enabled": "true",
}
DeployCiliumOptionsAndDNS(kubectl, ciliumFilename, daemonCfg)
})
Expand Down Expand Up @@ -966,15 +964,10 @@ var _ = Describe("K8sPolicyTest", func() {

// curl commands are issued from the first k8s worker where all
// the app instances are running
hubblePod1, err := kubectl.GetHubbleClientPodOnNodeWithLabel(
helpers.CiliumNamespace, helpers.K8s1,
)
Expect(err).Should(BeNil(), "unable to find hubble-cli pod on %s", helpers.K8s1)

By("Starting hubble observe and generating traffic which should%s redirect to proxy", not)
ctx, cancel := context.WithCancel(context.Background())
hubbleRes := kubectl.HubbleObserveFollow(
ctx, helpers.CiliumNamespace, hubblePod1,
ctx, helpers.CiliumNamespace, ciliumPod,
// since 0s is important here so no historic events from the
// buffer are shown, only follow from the current time
"--type l7 --since 0s",
Expand All @@ -997,7 +990,7 @@ var _ = Describe("K8sPolicyTest", func() {
res.ExpectSuccess("%q cannot curl %q", appPods[helpers.App2], resource)

By("Checking that aforementioned traffic was%sredirected to the proxy", not)
err = hubbleRes.WaitUntilMatchFilterLineTimeout(filter, expect, hubbleTimeout)
err := hubbleRes.WaitUntilMatchFilterLineTimeout(filter, expect, hubbleTimeout)
if redirected {
ExpectWithOffset(1, err).To(BeNil(), "traffic was not redirected to the proxy when it should have been")
} else {
Expand Down Expand Up @@ -1104,7 +1097,6 @@ var _ = Describe("K8sPolicyTest", func() {
// avoid incomplete teardown if any step fails.
_ = kubectl.Delete(demoYAML)
ExpectAllPodsTerminated(kubectl)
kubectl.DeleteHubbleClientPods(helpers.CiliumNamespace)
})

AfterEach(func() {
Expand Down
40 changes: 20 additions & 20 deletions test/k8sT/hubble.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,12 +35,12 @@ var _ = Describe("K8sHubbleTest", func() {
var (
kubectl *helpers.Kubectl
ciliumFilename string
k8s1NodeName string
ciliumPodK8s1 string

hubblePodK8s1 string

hubbleNamespace = helpers.CiliumNamespace
hubbleRelayService = "hubble-relay"
hubbleRelayAddress string
hubbleRelayNamespace = helpers.CiliumNamespace
hubbleRelayService = "hubble-relay"
hubbleRelayAddress string

demoPath string

Expand Down Expand Up @@ -108,23 +108,22 @@ var _ = Describe("K8sHubbleTest", func() {
BeforeAll(func() {
kubectl = helpers.CreateKubectl(helpers.K8s1VMName(), logger)
ciliumFilename = helpers.TimestampFilename("cilium.yaml")
k8s1NodeName, _ = kubectl.GetNodeInfo(helpers.K8s1)

demoPath = helpers.ManifestGet(kubectl.BasePath(), "demo.yaml")

DeployCiliumOptionsAndDNS(kubectl, ciliumFilename, map[string]string{
"global.hubble.metricsServer": fmt.Sprintf(":%s", prometheusPort),
"global.hubble.metrics.enabled": `"{dns:query;ignoreAAAA,drop,tcp,flow,port-distribution,icmp,http}"`,
"global.hubble.cli.enabled": "true",
"global.hubble.relay.enabled": "true",
})

var err error
ExpectHubbleCLIReady(kubectl, hubbleNamespace)
hubblePodK8s1, err = kubectl.GetHubbleClientPodOnNodeWithLabel(hubbleNamespace, helpers.K8s1)
ciliumPodK8s1, err = kubectl.GetCiliumPodOnNodeWithLabel(helpers.CiliumNamespace, helpers.K8s1)
Expect(err).Should(BeNil(), "unable to find hubble-cli pod on %s", helpers.K8s1)

ExpectHubbleRelayReady(kubectl, hubbleNamespace)
hubbleRelayIP, hubbleRelayPort, err := kubectl.GetServiceHostPort(hubbleNamespace, hubbleRelayService)
ExpectHubbleRelayReady(kubectl, hubbleRelayNamespace)
hubbleRelayIP, hubbleRelayPort, err := kubectl.GetServiceHostPort(hubbleRelayNamespace, hubbleRelayService)
Expect(err).Should(BeNil(), "Cannot get service %s", hubbleRelayService)
Expect(govalidator.IsIP(hubbleRelayIP)).Should(BeTrue(), "hubbleRelayIP is not an IP")
hubbleRelayAddress = net.JoinHostPort(hubbleRelayIP, strconv.Itoa(hubbleRelayPort))
Expand All @@ -144,8 +143,7 @@ var _ = Describe("K8sHubbleTest", func() {
})

AfterAll(func() {
kubectl.DeleteHubbleClientPods(hubbleNamespace)
kubectl.DeleteHubbleRelay(hubbleNamespace)
kubectl.DeleteHubbleRelay(hubbleRelayNamespace)
kubectl.CloseSSHClient()
})

Expand Down Expand Up @@ -182,7 +180,7 @@ var _ = Describe("K8sHubbleTest", func() {
It("Test L3/L4 Flow", func() {
ctx, cancel := context.WithTimeout(context.Background(), helpers.MidCommandTimeout)
defer cancel()
follow := kubectl.HubbleObserveFollow(ctx, hubbleNamespace, hubblePodK8s1, fmt.Sprintf(
follow := kubectl.HubbleObserveFollow(ctx, helpers.CiliumNamespace, ciliumPodK8s1, fmt.Sprintf(
"--last 1 --type trace --from-pod %s/%s --to-namespace %s --to-label %s --to-port %d",
namespaceForTest, appPods[helpers.App2], namespaceForTest, app1Labels, app1Port))

Expand All @@ -196,8 +194,9 @@ var _ = Describe("K8sHubbleTest", func() {
// Basic check for L4 Prometheus metrics.
_, nodeIP := kubectl.GetNodeInfo(helpers.K8s1)
metricsUrl := fmt.Sprintf("%s/metrics", net.JoinHostPort(nodeIP, prometheusPort))
res = kubectl.ExecPodCmd(hubbleNamespace, hubblePodK8s1, helpers.CurlFail(metricsUrl))
res.ExpectSuccess("%s/%s cannot curl metrics %q", hubblePodK8s1, hubblePodK8s1, app1ClusterIP)
res, err = kubectl.ExecInHostNetNS(ctx, k8s1NodeName, helpers.CurlFail(metricsUrl))
Expect(err).To(BeNil(), "failed to execute curl on node %q", k8s1NodeName)
res.ExpectSuccess("%s/%s cannot curl metrics %q", helpers.CiliumNamespace, ciliumPodK8s1, app1ClusterIP)
res.ExpectContains(`hubble_flows_processed_total{subtype="to-endpoint",type="Trace",verdict="FORWARDED"}`)
})

Expand All @@ -209,7 +208,7 @@ var _ = Describe("K8sHubbleTest", func() {
// In case a node was temporarily unavailable, hubble-relay will
// reconnect once it receives a new request. Therefore we retry
// in a 5 second interval.
hubbleObserveUntilMatch(hubbleNamespace, hubblePodK8s1, fmt.Sprintf(
hubbleObserveUntilMatch(helpers.CiliumNamespace, ciliumPodK8s1, fmt.Sprintf(
"--server %s --last 1 --type trace --from-pod %s/%s --to-namespace %s --to-label %s --to-port %d",
hubbleRelayAddress, namespaceForTest, appPods[helpers.App2], namespaceForTest, app1Labels, app1Port),
`{$.Type}`, "L3_L4",
Expand All @@ -226,7 +225,7 @@ var _ = Describe("K8sHubbleTest", func() {

ctx, cancel := context.WithTimeout(context.Background(), helpers.MidCommandTimeout)
defer cancel()
follow := kubectl.HubbleObserveFollow(ctx, hubbleNamespace, hubblePodK8s1, fmt.Sprintf(
follow := kubectl.HubbleObserveFollow(ctx, helpers.CiliumNamespace, ciliumPodK8s1, fmt.Sprintf(
"--last 1 --type l7 --from-pod %s/%s --to-namespace %s --to-label %s --protocol http",
namespaceForTest, appPods[helpers.App2], namespaceForTest, app1Labels))

Expand All @@ -240,8 +239,9 @@ var _ = Describe("K8sHubbleTest", func() {
// Basic check for L7 Prometheus metrics.
_, nodeIP := kubectl.GetNodeInfo(helpers.K8s1)
metricsUrl := fmt.Sprintf("%s/metrics", net.JoinHostPort(nodeIP, prometheusPort))
res = kubectl.ExecPodCmd(hubbleNamespace, hubblePodK8s1, helpers.CurlFail(metricsUrl))
res.ExpectSuccess("%s/%s cannot curl metrics %q", hubbleNamespace, hubblePodK8s1, app1ClusterIP)
res, err = kubectl.ExecInHostNetNS(ctx, k8s1NodeName, helpers.CurlFail(metricsUrl))
Expect(err).To(BeNil(), "failed to execute curl on node %q", k8s1NodeName)
res.ExpectSuccess("%s/%s cannot curl metrics %q", helpers.CiliumNamespace, ciliumPodK8s1, app1ClusterIP)
res.ExpectContains(`hubble_flows_processed_total{subtype="HTTP",type="L7",verdict="FORWARDED"}`)
})

Expand All @@ -256,7 +256,7 @@ var _ = Describe("K8sHubbleTest", func() {
// In case a node was temporarily unavailable, hubble-relay will
// reconnect once it receives a new request. Therefore we retry
// in a 5 second interval.
hubbleObserveUntilMatch(hubbleNamespace, hubblePodK8s1, fmt.Sprintf(
hubbleObserveUntilMatch(helpers.CiliumNamespace, ciliumPodK8s1, fmt.Sprintf(
"--server %s --last 1 --type l7 --from-pod %s/%s --to-namespace %s --to-label %s --protocol http",
hubbleRelayAddress, namespaceForTest, appPods[helpers.App2], namespaceForTest, app1Labels),
`{$.Type}`, "L7",
Expand Down