diff --git a/build/tools/kube-terminal-dev.sh b/build/tools/kube-terminal-dev.sh new file mode 100755 index 0000000000..ce1ab1549e --- /dev/null +++ b/build/tools/kube-terminal-dev.sh @@ -0,0 +1,61 @@ +#!/usr/bin/env bash + +# Colours +CYAN="\033[96m" +YELLOW="\033[93m" +RED="\033[91m" +RESET="\033[0m" +BOLD="\033[1m" + +# Program Paths: +PROG=$(basename ${BASH_SOURCE[0]}) +PROG_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +STRATOS_DIR="$( cd "${PROG_DIR}/../.." && pwd )" + +echo "Creating Service Account" +SRC="${STRATOS_DIR}/deploy/kubernetes/console/templates/service-account.yaml" + +TEMPFILE=$(mktemp) +cp $SRC $TEMPFILE +sed -i.bak '/\s*helm/d' $TEMPFILE +sed -i.bak '/\s*app\.kubernetes\.io\/version/d' $TEMPFILE +sed -i.bak '/\s*app\.kubernetes\.io\/instance/d' $TEMPFILE +sed -i.bak '/\s*{{-/d' $TEMPFILE + +# Create a namespace +NS="stratos-dev" +kubectl get ns $NS > /dev/null 2>&1 +if [ $? -ne 0 ]; then + kubectl create ns $NS +fi + +kubectl apply -n $NS -f $TEMPFILE +USER=stratos-dev-admin-user +USER=stratos + +# Service account should be created - now need to get token +SECRET=$(kubectl get -n $NS sa $USER -o json | jq -r '.secrets[0].name') +TOKEN=$(kubectl get -n $NS secret $SECRET -o json | jq -r '.data.token') +echo "Token secret: $SECRET" +TOKEN=$(echo $TOKEN | base64 -d -) +echo "Token $TOKEN" + +rm -f $TEMPFILE +rm -f $TEMPFILE.bak + +CFG=${STRATOS_DIR}/src/jetstream/config.properties +touch $CFG + +echo -e "\n# Kubernetes Terminal Config for dev" >> $CFG +echo "STRATOS_KUBERNETES_NAMESPACE=stratos-dev" >> $CFG +echo "STRATOS_KUBERNETES_TERMINAL_IMAGE=splatform/stratos-kube-terminal:dev" >> $CFG +echo "KUBE_TERMINAL_SERVICE_ACCOUNT_TOKEN=$TOKEN" >> $CFG + +MKUBE=$(minikube ip) +if [ $? -eq 0 ]; then + echo "KUBERNETES_SERVICE_HOST=$MKUBE" >> $CFG + echo "KUBERNETES_SERVICE_PORT=8443" >> $CFG +else + echo "KUBERNETES_SERVICE_HOST=" >> $CFG + echo "KUBERNETES_SERVICE_PORT=8443" >> $CFG +fi diff --git a/custom-src/deploy/kubernetes/__stratos.tpl b/custom-src/deploy/kubernetes/__stratos.tpl index 3ebbce1efe..4999b0e2cb 100644 --- a/custom-src/deploy/kubernetes/__stratos.tpl +++ b/custom-src/deploy/kubernetes/__stratos.tpl @@ -12,4 +12,8 @@ value: "mongodb://{{ .Release.Name }}-fdbdoclayer:27016" - name: SYNC_SERVER_URL value: "http://{{ .Release.Name }}-chartsync:8080" +- name: STRATOS_KUBERNETES_NAMESPACE + value: "{{ .Release.Namespace }}" +- name: STRATOS_KUBERNETES_TERMINAL_IMAGE + value: "{{.Values.kube.registry.hostname}}/{{.Values.kube.organization}}/stratos-kube-terminal:{{.Values.consoleVersion}}" {{- end }} \ No newline at end of file diff --git a/custom-src/deploy/kubernetes/custom-build.sh b/custom-src/deploy/kubernetes/custom-build.sh index b850a85563..35d6202755 100644 --- a/custom-src/deploy/kubernetes/custom-build.sh +++ b/custom-src/deploy/kubernetes/custom-build.sh @@ -21,4 +21,8 @@ function custom_image_build() { # Build and push an image for the Helm Repo Sync Tool log "-- Building/publishing Monocular Chart Repo Sync Tool" patchAndPushImage stratos-chartsync Dockerfile "${STRATOS_PATH}/src/jetstream/plugins/monocular/chart-repo" + + # Build and push an image for the Kubernetes Terminal + log "-- Building/publishing Kubernetes Terminal" + patchAndPushImage stratos-kube-terminal Dockerfile.kubeterminal "${STRATOS_PATH}/deploy/containers/kube-terminal" } \ No newline at end of file diff --git a/custom-src/deploy/kubernetes/imagelist.txt b/custom-src/deploy/kubernetes/imagelist.txt new file mode 100644 index 0000000000..e628bfa984 --- /dev/null +++ b/custom-src/deploy/kubernetes/imagelist.txt @@ -0,0 +1 @@ +stratos-kube-terminal:_VERSION_ \ No newline at end of file diff --git a/custom-src/frontend/app/custom/kubernetes/kube-terminal/kube-console.component.html b/custom-src/frontend/app/custom/kubernetes/kube-terminal/kube-console.component.html new file mode 100644 index 0000000000..905abb2f62 --- /dev/null +++ b/custom-src/frontend/app/custom/kubernetes/kube-terminal/kube-console.component.html @@ -0,0 +1,18 @@ + +

Kubernetes Terminal

+
+ + + + + +
+
+ + diff --git a/custom-src/frontend/app/custom/kubernetes/kube-terminal/kube-console.component.scss b/custom-src/frontend/app/custom/kubernetes/kube-terminal/kube-console.component.scss new file mode 100644 index 0000000000..e69de29bb2 diff --git a/custom-src/frontend/app/custom/kubernetes/kube-terminal/kube-console.component.spec.ts b/custom-src/frontend/app/custom/kubernetes/kube-terminal/kube-console.component.spec.ts new file mode 100644 index 0000000000..8671538bc9 --- /dev/null +++ b/custom-src/frontend/app/custom/kubernetes/kube-terminal/kube-console.component.spec.ts @@ -0,0 +1,42 @@ +import { async, ComponentFixture, TestBed } from '@angular/core/testing'; +import { RouterTestingModule } from '@angular/router/testing'; +import { createBasicStoreModule } from '@stratosui/store/testing'; + +import { ApplicationService } from '../../../../../cloud-foundry/src/features/applications/application.service'; +import { ApplicationServiceMock } from '../../../../../cloud-foundry/test-framework/application-service-helper'; +import { TabNavService } from '../../../../tab-nav.service'; +import { CoreModule } from '../../../core/core.module'; +import { SharedModule } from '../../../shared/shared.module'; +import { KubeConsoleComponent } from './kube-console.component'; + +describe('KubeConsoleComponent', () => { + let component: KubeConsoleComponent; + let fixture: ComponentFixture; + + beforeEach(async(() => { + TestBed.configureTestingModule({ + declarations: [KubeConsoleComponent], + imports: [ + CoreModule, + SharedModule, + RouterTestingModule, + createBasicStoreModule() + ], + providers: [ + { provide: ApplicationService, useClass: ApplicationServiceMock }, + TabNavService + ], + }) + .compileComponents(); + })); + + beforeEach(() => { + fixture = TestBed.createComponent(KubeConsoleComponent); + component = fixture.componentInstance; + fixture.detectChanges(); + }); + + it('should create', () => { + expect(component).toBeTruthy(); + }); +}); diff --git a/custom-src/frontend/app/custom/kubernetes/kube-terminal/kube-console.component.ts b/custom-src/frontend/app/custom/kubernetes/kube-terminal/kube-console.component.ts new file mode 100644 index 0000000000..643fd02271 --- /dev/null +++ b/custom-src/frontend/app/custom/kubernetes/kube-terminal/kube-console.component.ts @@ -0,0 +1,95 @@ +import { Component, OnInit, ViewChild } from '@angular/core'; +import { ActivatedRoute } from '@angular/router'; +import { NEVER, Observable, Subject, Subscription } from 'rxjs'; +import websocketConnect, { normalClosureMessage } from 'rxjs-websockets'; +import { catchError, tap, switchMap, map } from 'rxjs/operators'; + +import { IHeaderBreadcrumb } from '../../../shared/components/page-header/page-header.types'; +import { SshViewerComponent } from '../../../shared/components/ssh-viewer/ssh-viewer.component'; +import { KubernetesEndpointService } from '../services/kubernetes-endpoint.service'; +import { BaseKubeGuid } from '../kubernetes-page.types'; +import { KubernetesService } from '../services/kubernetes.service'; + + +@Component({ + selector: 'app-kube-console', + templateUrl: './kube-console.component.html', + styleUrls: ['./kube-console.component.scss'], + providers: [ + { + provide: BaseKubeGuid, + useFactory: (activatedRoute: ActivatedRoute) => { + return { + guid: activatedRoute.snapshot.params.endpointId + }; + }, + deps: [ + ActivatedRoute + ] + }, + KubernetesService, + KubernetesEndpointService, + ] +}) +export class KubeConsoleComponent implements OnInit { + + public messages: Observable; + + public connectionStatus = new Subject(); + + public sshInput: Subject; + + public errorMessage: string; + + public connected: boolean; + + public kubeSummaryLink: string; + + public breadcrumbs$: Observable; + + @ViewChild('sshViewer', { static: false }) sshViewer: SshViewerComponent; + + constructor( + public kubeEndpointService: KubernetesEndpointService, + ) { } + + ngOnInit() { + this.connectionStatus.next(0); + const guid = this.kubeEndpointService.baseKube.guid; + this.kubeSummaryLink = `/kubernetes/${guid}/summary`; + + if (!guid) { + this.messages = NEVER; + this.connectionStatus.next(0); + this.errorMessage = 'No Endpoint ID available'; + } else { + const host = window.location.host; + const protocol = window.location.protocol === 'https:' ? 'wss' : 'ws'; + const streamUrl = ( + `${protocol}://${host}/pp/v1/kubeterminal/${guid}` + ); + this.sshInput = new Subject(); + const connection = websocketConnect(streamUrl); + + this.messages = connection.pipe( + tap(() => this.connectionStatus.next(1)), + switchMap(getResponse => getResponse(this.sshInput)), + catchError((e: Error) => { + if (e.message !== normalClosureMessage && !this.sshViewer.isConnected) { + this.errorMessage = 'Error launching Kubernetes Terminal'; + } + return []; + })); + + // Breadcrumbs + this.breadcrumbs$ = this.kubeEndpointService.endpoint$.pipe( + map(endpoint => ([{ + breadcrumbs: [ + { value: endpoint.entity.name, routerLink: `/kubernetes/${endpoint.entity.guid}` }, + ] + }]) + ) + ); + } + } +} diff --git a/custom-src/frontend/app/custom/kubernetes/kubernetes.module.ts b/custom-src/frontend/app/custom/kubernetes/kubernetes.module.ts index 0c2111ed22..57a9ff32eb 100644 --- a/custom-src/frontend/app/custom/kubernetes/kubernetes.module.ts +++ b/custom-src/frontend/app/custom/kubernetes/kubernetes.module.ts @@ -91,6 +91,7 @@ import { KubernetesNamespacesTabComponent } from './tabs/kubernetes-namespaces-t import { KubernetesNodesTabComponent } from './tabs/kubernetes-nodes-tab/kubernetes-nodes-tab.component'; import { KubernetesPodsTabComponent } from './tabs/kubernetes-pods-tab/kubernetes-pods-tab.component'; import { KubernetesSummaryTabComponent } from './tabs/kubernetes-summary-tab/kubernetes-summary.component'; +import { KubeConsoleComponent } from './kube-terminal/kube-console.component'; /* tslint:disable:max-line-length */ @@ -142,6 +143,7 @@ import { KubernetesSummaryTabComponent } from './tabs/kubernetes-summary-tab/kub NodePodCountComponent, KubernetesServicePortsComponent, KubernetesPodStatusComponent, + KubeConsoleComponent, KubeServiceCardComponent, KubernetesResourceViewerComponent, KubeServiceCardComponent, diff --git a/custom-src/frontend/app/custom/kubernetes/kubernetes.routing.ts b/custom-src/frontend/app/custom/kubernetes/kubernetes.routing.ts index da6956e8a5..c7681161ce 100644 --- a/custom-src/frontend/app/custom/kubernetes/kubernetes.routing.ts +++ b/custom-src/frontend/app/custom/kubernetes/kubernetes.routing.ts @@ -23,6 +23,7 @@ import { KubernetesNodesTabComponent } from './tabs/kubernetes-nodes-tab/kuberne import { KubernetesPodsTabComponent } from './tabs/kubernetes-pods-tab/kubernetes-pods-tab.component'; import { KubernetesSummaryTabComponent } from './tabs/kubernetes-summary-tab/kubernetes-summary.component'; import { KubedashConfigurationComponent } from './kubernetes-dashboard/kubedash-configuration/kubedash-configuration.component'; +import { KubeConsoleComponent } from './kube-terminal/kube-console.component'; const kubernetes: Routes = [{ path: '', @@ -129,6 +130,13 @@ const kubernetes: Routes = [{ { path: ':endpointId/dashboard-config', component: KubedashConfigurationComponent, +}, +{ + path: ':endpointId/terminal', + component: KubeConsoleComponent, + data: { + uiNoMargin: true + } } ]; diff --git a/custom-src/frontend/app/custom/kubernetes/services/kubernetes-endpoint.service.ts b/custom-src/frontend/app/custom/kubernetes/services/kubernetes-endpoint.service.ts index 7bafc4d2ca..8b8927e376 100644 --- a/custom-src/frontend/app/custom/kubernetes/services/kubernetes-endpoint.service.ts +++ b/custom-src/frontend/app/custom/kubernetes/services/kubernetes-endpoint.service.ts @@ -40,6 +40,7 @@ export class KubernetesEndpointService { kubeDashboardStatus$: Observable; kubeDashboardLabel$: Observable; kubeDashboardConfigured$: Observable; + kubeTerminalEnabled$: Observable; constructor( public baseKube: BaseKubeGuid, @@ -158,6 +159,11 @@ export class KubernetesEndpointService { map(auth => auth.sessionData['plugin-config'].kubeDashboardEnabled === 'true') ); + this.kubeTerminalEnabled$ = this.store.select('auth').pipe( + filter(auth => !!auth.sessionData['plugin-config']), + map(auth => auth.sessionData['plugin-config'].kubeTerminalEnabled === 'true') + ); + const kubeDashboardStatus$ = kubeEntityCatalog.dashboard.store.getEntityService(this.kubeGuid).waitForEntity$.pipe( map(status => status.entity), filter(status => !!status) diff --git a/custom-src/frontend/app/custom/kubernetes/tabs/kubernetes-summary-tab/kubernetes-summary.component.html b/custom-src/frontend/app/custom/kubernetes/tabs/kubernetes-summary-tab/kubernetes-summary.component.html index a690311532..4f3165dbe8 100644 --- a/custom-src/frontend/app/custom/kubernetes/tabs/kubernetes-summary-tab/kubernetes-summary.component.html +++ b/custom-src/frontend/app/custom/kubernetes/tabs/kubernetes-summary-tab/kubernetes-summary.component.html @@ -4,6 +4,10 @@ dashboard View Dashboard + diff --git a/custom-src/frontend/app/custom/kubernetes/tabs/kubernetes-summary-tab/kubernetes-summary.component.ts b/custom-src/frontend/app/custom/kubernetes/tabs/kubernetes-summary-tab/kubernetes-summary.component.ts index fc4b0a9ce5..dbd6642698 100644 --- a/custom-src/frontend/app/custom/kubernetes/tabs/kubernetes-summary-tab/kubernetes-summary.component.ts +++ b/custom-src/frontend/app/custom/kubernetes/tabs/kubernetes-summary-tab/kubernetes-summary.component.ts @@ -64,6 +64,8 @@ export class KubernetesSummaryTabComponent implements OnInit, OnDestroy { source: SafeResourceUrl; dashboardLink: string; + kubeTerminalLink: string; + public podCapacity$: Observable; public diskPressure$: Observable; public memoryPressure$: Observable; @@ -159,6 +161,7 @@ export class KubernetesSummaryTabComponent implements OnInit, OnDestroy { warningText: `Nodes with unknown ready status found` }); this.dashboardLink = `/kubernetes/${guid}/dashboard`; + this.kubeTerminalLink = `/kubernetes/${guid}/terminal`; this.kubeNodeVersions$ = this.kubeEndpointService.getNodeKubeVersions(nodes$).pipe(startWith('-')); diff --git a/deploy/ci/build-aio-image-canary.yml b/deploy/ci/build-aio-image-canary.yml index cc19412f7e..5e64ab6bcf 100644 --- a/deploy/ci/build-aio-image-canary.yml +++ b/deploy/ci/build-aio-image-canary.yml @@ -73,6 +73,7 @@ jobs: tag: stratos/deploy/ci/tasks/build-images/canary-tag tag_as_latest: false labels_file: image-tag/image-labels + squash: true build_args_file: image-tag/ui-build-args build_args: CANARY_BUILD: true diff --git a/deploy/ci/suse-console-dev-releases.yml b/deploy/ci/suse-console-dev-releases.yml index 82b15b962c..ef2247a720 100644 --- a/deploy/ci/suse-console-dev-releases.yml +++ b/deploy/ci/suse-console-dev-releases.yml @@ -65,7 +65,13 @@ resources: username: ((docker-username)) password: ((docker-password)) repository: ((docker-repository))/stratos-chartsync - +- name: kube-terminal-image + type: docker-image + source: + username: ((docker-username)) + password: ((docker-password)) + repository: ((docker-repository))/stratos-kube-terminal + # Artifacts - name: image-tag type: s3 @@ -147,6 +153,13 @@ jobs: tag: image-tag/v2-alpha-tag patch_base_reg: ((patch-base-reg)) patch_base_tag: ((patch-base-tag)) + - put: kube-terminal-image + params: + dockerfile: stratos/deploy/containers/kube-terminal/Dockerfile.kubeterminal + build: stratos/deploy/containers/kube-terminal + tag: image-tag/v2-alpha-tag + patch_base_reg: ((patch-base-reg)) + patch_base_tag: ((patch-base-tag)) - do: - put: ui-image params: diff --git a/deploy/common-build.sh b/deploy/common-build.sh index 2e8906adf4..913c159a17 100644 --- a/deploy/common-build.sh +++ b/deploy/common-build.sh @@ -43,7 +43,7 @@ function buildAndPublishImage { # Proxy support # Remove intermediate containers after a successful build -BUILD_ARGS="--rm=true --squash" +BUILD_ARGS="--rm=true" RUN_ARGS="" if [ -n "${http_proxy:-}" -o -n "${HTTP_PROXY:-}" ]; then BUILD_ARGS="${BUILD_ARGS} --build-arg http_proxy=${http_proxy:-${HTTP_PROXY}}" @@ -54,6 +54,15 @@ if [ -n "${https_proxy:-}" -o -n "${HTTPS_PROXY:-}" ]; then RUN_ARGS="${RUN_ARGS} -e https_proxy=${https_proxy:-${HTTPS_PROXY}}" fi +# Check if we can squash +CAN_SQUASH=$(docker info 2>&1 | grep "Experimental: true" -c | cat) +if [ "${CAN_SQUASH}" == "1" ]; then + BUILD_ARGS="${BUILD_ARGS} --squash" + echo "Images will be squashed" +else + echo "Images will NOT be squashed" +fi + # Use correct sed command for Mac SED="sed -r" unamestr=`uname` @@ -104,7 +113,6 @@ function cleanup { echo "-- Cleaning up ${STRATOS_PATH}" rm -rf ${STRATOS_PATH}/dist rm -rf ${STRATOS_PATH}/node_modules - rm -rf ${STRATOS_PATH}/bower_components echo echo "-- Cleaning up ${STRATOS_PATH}/deploy/containers/nginx/dist" rm -rf ${STRATOS_PATH}/deploy/containers/nginx/dist diff --git a/deploy/containers/kube-terminal/Dockerfile.kubeterminal b/deploy/containers/kube-terminal/Dockerfile.kubeterminal new file mode 100644 index 0000000000..1872e5abb0 --- /dev/null +++ b/deploy/containers/kube-terminal/Dockerfile.kubeterminal @@ -0,0 +1,58 @@ +FROM splatform/stratos-bk-build-base:leap15_1 as terminal-builder +USER root +WORKDIR /root + +# Kubectl versions +RUN curl -L -o kubectl_1.18 https://storage.googleapis.com/kubernetes-release/release/v1.18.2/bin/linux/amd64/kubectl +RUN curl -L -o kubectl_1.17 https://storage.googleapis.com/kubernetes-release/release/v1.17.5/bin/linux/amd64/kubectl +RUN curl -L -o kubectl_1.16 https://storage.googleapis.com/kubernetes-release/release/v1.16.9/bin/linux/amd64/kubectl +RUN curl -L -o kubectl_1.15 https://storage.googleapis.com/kubernetes-release/release/v1.15.11/bin/linux/amd64/kubectl +RUN curl -L -o kubectl_1.14 https://storage.googleapis.com/kubernetes-release/release/v1.14.10/bin/linux/amd64/kubectl + +# Tar each one up, to save space in the image +RUN gzip kubectl_1.18 +RUN gzip kubectl_1.17 +RUN gzip kubectl_1.16 +RUN gzip kubectl_1.15 +RUN gzip kubectl_1.14 + +# Fetch Helm 3 package +RUN curl -L -o helm.tar.gz https://get.helm.sh/helm-v3.1.2-linux-amd64.tar.gz && \ + tar -xvf helm.tar.gz --strip-components=1 && \ + gzip helm + +RUN ls -al + +# Use small base image with very little in it +FROM splatform/stratos-base:leap15_1 + +# Use gzip from the builder image +COPY --from=terminal-builder /usr/bin/gunzip /usr/bin/ +COPY --from=terminal-builder /usr/bin/gzip /usr/bin/ + +RUN mkdir /stratos + +# Copy the various kubectl versions + +COPY --from=terminal-builder /root/helm.gz /stratos/helm.gz +COPY --from=terminal-builder /root/kubectl* /stratos/ + +# Run as user 'stratos' +RUN useradd -ms /bin/bash stratos -K MAIL_DIR=/dev/null + +RUN chown -R stratos /stratos && \ + chgrp -R users /stratos + +# Remove a few packages +RUN zypper rm -y diffutils shadow fillup openssl + +# Remove zypper +RUN zypper rm -y dirmngr && \ + rm -rf /usr/bin/rpm* + +USER stratos +WORKDIR /home/stratos + +ADD ./kubeconsole.bashrc /home/stratos/.bashrc + +CMD exec /bin/bash -c "trap : TERM INT; sleep infinity & wait" diff --git a/deploy/containers/kube-terminal/kubeconsole.bashrc b/deploy/containers/kube-terminal/kubeconsole.bashrc new file mode 100644 index 0000000000..0a1edfdd56 --- /dev/null +++ b/deploy/containers/kube-terminal/kubeconsole.bashrc @@ -0,0 +1,77 @@ + +CYAN="\033[96m" +YELLOW="\033[93m" +GREEN="\033[92m" +RESET="\033[0m" +BOLD="\033[1m" +DIM="\033[2m" + +echo -e "${BOLD}${GREEN}SUSE Stratos Console${RESET}" +echo "" +echo -e "${CYAN}Kubernetes Terminal${RESET}" +echo "" + +# Only do these on first run +if [ ! -f "/stratos/.firstrun" ]; then + # Unpack helm comand + gunzip /stratos/helm.gz + + # Need to choose appropriate kubectl version + pushd /stratos > /dev/null + # Default to the newwest version that we have + USE=$(ls kubectl_* | sort -r | head -n1) + popd > /dev/null + + # If env var K8S_VERSION is set, then use it (major.minor only) + if [ -n "${K8S_VERSION}" ]; then + VERSION="kubectl_${K8S_VERSION}.gz" + if [ -f "/stratos/${VERSION}" ]; then + USE=${VERSION} + fi + fi + + gunzip /stratos/${USE} + VER=${USE::-3} + mv /stratos/${VER} /stratos/kubectl + chmod +x /stratos/kubectl +fi + +export PATH=/stratos:$PATH + +export KUBECONFIG=${HOME}/.stratos/kubeconfig +export PS1="\033[92mstratos>\033[0m" +alias k=kubectl + +# Helm shell completion +source <(helm completion bash) + +#helm repo remove stable > /dev/null + +if [ ! -f "/stratos/.firstrun" ]; then + if [ -f "${HOME}/.stratos/helm-setup" ]; then + echo "Setting up Helm repositories ..." + source "${HOME}/.stratos/helm-setup" > /dev/null + helm repo update 2>&1 > /dev/null + echo "" + fi + + if [ -f "${HOME}/.stratos/history" ]; then + cat ${HOME}/.stratos/history > ${HOME}/.bash_history + fi +fi + +# Make Bash append rather than overwrite the history on disk: +shopt -s histappend +# A new shell gets the history lines from all previous shells +PROMPT_COMMAND='history -a' +# Don't put duplicate lines in the history. +export HISTCONTROL=ignoredups + +touch "/stratos/.firstrun" + +# Remove any env vars matching KUBERNETES +unset `compgen -A variable | grep KUBERNETES` + +echo +echo -e "Ready - ${CYAN}kubectl${RESET} and ${CYAN}helm${RESET} commands are available" +echo "" diff --git a/deploy/kubernetes/build.sh b/deploy/kubernetes/build.sh index 63cc4a0f63..1b19bf1408 100755 --- a/deploy/kubernetes/build.sh +++ b/deploy/kubernetes/build.sh @@ -169,11 +169,10 @@ function patchDockerfile { if [ "${DOCKER_REG_DEFAULTS}" == "false" ]; then sed -i.bak "s@splatform@${DOCKER_REGISTRY}/${DOCKER_ORG}@g" "${FOLDER}/${PATCHED_DOCKER_FILE}" fi - sed -i.bak "s/opensuse/${BASE_IMAGE_TAG}/g" "${FOLDER}/${PATCHED_DOCKER_FILE}" + sed -i.bak "s/leap15_1/${BASE_IMAGE_TAG}/g" "${FOLDER}/${PATCHED_DOCKER_FILE}" popd > /dev/null 2>&1 } - # # MAIN ------------------------------------------------------------------------------------------- # @@ -184,7 +183,9 @@ popd > /dev/null 2>&1 echo "Base path: ${STRATOS_PATH}" # cleanup output, intermediate artifacts -cleanup +if [ "${CHART_ONLY}" == "false" ]; then + cleanup +fi # Clean any old patched docker files left if previously errored # rm -rf ${STRATOS_PATH}/deploy/Dockerfile.*.patched diff --git a/deploy/kubernetes/console/templates/deployment.yaml b/deploy/kubernetes/console/templates/deployment.yaml index 60656b1d83..8dbef8dc25 100644 --- a/deploy/kubernetes/console/templates/deployment.yaml +++ b/deploy/kubernetes/console/templates/deployment.yaml @@ -81,6 +81,10 @@ spec: value: "{{.Values.consoleVersion}}:{{ .Release.Revision }}" - name: STRATOS_HELM_RELEASE value: "{{ .Release.Name }}" + - name: STRATOS_KUBERNETES_NAMESPACE + value: "{{ .Release.Namespace }}" + - name: STRATOS_KUBERNETES_TERMINAL_IMAGE + value: "{{.Values.kube.registry.hostname}}/{{.Values.kube.organization}}/stratos-kube-terminal:{{.Values.consoleVersion}}" - name: DB_USER valueFrom: secretKeyRef: @@ -313,6 +317,9 @@ spec: imagePullSecrets: - name: {{.Values.dockerRegistrySecret}} {{- end }} + {{- if and (eq (printf "%s" .Values.kube.auth) "rbac") (.Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1") }} + serviceAccountName: "stratos" + {{- end }} volumes: - name: "{{ .Release.Name }}-encryption-key-volume" secret: diff --git a/deploy/kubernetes/console/templates/service-account.yaml b/deploy/kubernetes/console/templates/service-account.yaml new file mode 100644 index 0000000000..d8ef0e07d7 --- /dev/null +++ b/deploy/kubernetes/console/templates/service-account.yaml @@ -0,0 +1,61 @@ +{{- if and (eq (printf "%s" .Values.kube.auth) "rbac") (.Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1") }} +--- +# Service account main Stratos Deployment +# Allows it to create some resources in its namespace +apiVersion: "v1" +kind: "ServiceAccount" +metadata: + name: "stratos" + labels: + app.kubernetes.io/component: "stratos" + app.kubernetes.io/instance: "{{ .Release.Name }}" + app.kubernetes.io/name: "stratos" + app.kubernetes.io/version: "{{ .Chart.AppVersion }}" + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" +--- +# Role "stratos-role" only used by account "stratos" +apiVersion: "rbac.authorization.k8s.io/v1" +kind: "Role" +metadata: + name: "stratos-role" + labels: + app.kubernetes.io/component: "stratos-role" + app.kubernetes.io/instance: "{{ .Release.Name }}" + app.kubernetes.io/name: "stratos" + app.kubernetes.io/version: "{{ .Chart.AppVersion }}" + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" +rules: +- apiGroups: + - "" + resources: + - "secrets" + - "pods" + verbs: + - "create" + - "update" + - "get" + - "list" + - "delete" +- apiGroups: [""] + resources: ["pods/exec"] + verbs: ["create", "get"] +--- +# Role binding for service account "stratos" and role "stratos-role" +apiVersion: "rbac.authorization.k8s.io/v1" +kind: "RoleBinding" +metadata: + name: "stratos-role-binding" + labels: + app.kubernetes.io/component: "stratos-role-binding" + app.kubernetes.io/instance: "{{ .Release.Name }}" + app.kubernetes.io/name: "stratos" + app.kubernetes.io/version: "{{ .Chart.AppVersion }}" + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" +subjects: +- kind: "ServiceAccount" + name: "stratos" +roleRef: + apiGroup: "rbac.authorization.k8s.io" + kind: "Role" + name: "stratos-role" +{{- end }} diff --git a/deploy/kubernetes/imagelist-gen.sh b/deploy/kubernetes/imagelist-gen.sh index 77db87f409..1605cdb54d 100755 --- a/deploy/kubernetes/imagelist-gen.sh +++ b/deploy/kubernetes/imagelist-gen.sh @@ -12,6 +12,22 @@ __DIRNAME="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" printf "${BOLD}${CYAN}Generating ${YELLOW}imagelist.txt${RESET}\n" echo "" +STRATOS_FOLDER="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && cd ../../ && pwd )" + +# Add any customizations +function addCustomizations() { + + if [ -f "${STRATOS_FOLDER}/custom-src/deploy/kubernetes/imagelist.txt" ];then + echo "Including custom imagelist contents" + cat "${STRATOS_FOLDER}/custom-src/deploy/kubernetes/imagelist.txt" >> ./imagelist.txt + + # Update version number + VERSION=$(grep -Po 'consoleVersion: \K(.*)' ./values.yaml) + echo "Image Version: ${VERSION}" + sed -i 's/_VERSION_/'"${VERSION}"'/g' imagelist.txt + fi +} + CHART_FOLDER=${1} @@ -41,6 +57,11 @@ if [ $? -ne 0 ]; then echo -e "${BOLD}${RED}ERROR: Failed to render Helm Chart in order to generate image list" exit 1 fi + +# Add any customizations to the image list +# Mainly used if there are unreferenced images that need to be included +addCustomizations + popd > /dev/null printf "${CYAN}" diff --git a/docs/suse/kube-terminal-dev.md b/docs/suse/kube-terminal-dev.md new file mode 100644 index 0000000000..fe552b064c --- /dev/null +++ b/docs/suse/kube-terminal-dev.md @@ -0,0 +1,21 @@ +# Enabling the Kubernetes Terminal in local development + +You need a Kubernetes cluster with `kubectl` set up and configured with the kubeconfig file. + +Run the script `build/tools/kube-terminal-dev.sh` + +This script will: + +- Create a service account named `stratos` +- Create a namespace named `stratos-dev` +- Write environment variables to the `src/jetstream/config.properties` file + +If you have minikube running, the configuration for your Kubernetes API Server will be set correctly - otherwise +you will need to edit the `src/jetstream/config.properties` file and set these two variables: + +- `KUBERNETES_SERVICE_HOST` +- `KUBERNETES_SERVICE_PORT` + +The Jetstream backend should be configured. + +> Note: Ensure you set `ENABLE_TECH_PREVIEW=true` to enable the Kubernetes Terminal feature. \ No newline at end of file diff --git a/src/frontend/packages/core/src/shared/components/ssh-viewer/ssh-viewer.component.html b/src/frontend/packages/core/src/shared/components/ssh-viewer/ssh-viewer.component.html index 3d71c6e1a8..d5cee755ee 100644 --- a/src/frontend/packages/core/src/shared/components/ssh-viewer/ssh-viewer.component.html +++ b/src/frontend/packages/core/src/shared/components/ssh-viewer/ssh-viewer.component.html @@ -1,12 +1,17 @@
-
+
- Error occurred establishing SSH connection + {{ errorMessage || 'Error occurred establishing SSH connection' }}
Disconnected -
+
+
+
+ {{ message }} +
+
diff --git a/src/frontend/packages/core/src/shared/components/ssh-viewer/ssh-viewer.component.ts b/src/frontend/packages/core/src/shared/components/ssh-viewer/ssh-viewer.component.ts index b2162e00b7..8d9cedccb8 100644 --- a/src/frontend/packages/core/src/shared/components/ssh-viewer/ssh-viewer.component.ts +++ b/src/frontend/packages/core/src/shared/components/ssh-viewer/ssh-viewer.component.ts @@ -29,6 +29,8 @@ export class SshViewerComponent implements OnInit, OnDestroy { public isConnecting = false; private isDestroying = false; + public message = ''; + @ViewChild('terminal', { static: true }) container: ElementRef; private xterm: Terminal; @@ -66,7 +68,6 @@ export class SshViewerComponent implements OnInit, OnDestroy { this.xterm = new Terminal(); this.xterm.loadAddon(this.xtermFitAddon); this.xterm.open(this.container.nativeElement); - // this.xtermFitAddon.fit(); this.resize(); this.xterm.onKey(e => { @@ -115,8 +116,15 @@ export class SshViewerComponent implements OnInit, OnDestroy { this.msgSubscription = this.sshStream .subscribe( (data: string) => { - for (const c of data.split(' ')) { - this.xterm.write(String.fromCharCode(parseInt(c, 16))); + // Check for a window title message + if (!this.isWindowTitle(data)) { + for (const c of data.split(' ')) { + this.xterm.write(String.fromCharCode(parseInt(c, 16))); + } + } else { + console.log('Error') + const eMsg = this.errorMessage; + this.errorMessage = eMsg; } }, (err) => { @@ -130,4 +138,24 @@ export class SshViewerComponent implements OnInit, OnDestroy { } ); } + private isWindowTitle(data: string): boolean { + const chars = data.split(' '); + if (chars.length > 4 && + parseInt(chars[0], 16) === 27 && + parseInt(chars[1], 16) === 93 && + parseInt(chars[2], 16) === 50 && + parseInt(chars[3], 16) === 59) { + let title = ''; + for (let i = 4; i < chars.length - 1; i++) { + title += String.fromCharCode(parseInt(chars[i], 16)); + } + if (title.length > 0 && title.charAt(0) === '!') { + this.errorMessage = title.substr(1); + console.log(this.errorMessage); + return true; + } + this.message = title; + } + return false; + } } diff --git a/src/jetstream/main.go b/src/jetstream/main.go index 9b649da31d..f6f90c60c6 100644 --- a/src/jetstream/main.go +++ b/src/jetstream/main.go @@ -10,6 +10,7 @@ import ( "fmt" "io" "io/ioutil" + "math/rand" "net" "net/http" "os" @@ -115,6 +116,8 @@ func main() { } } + rand.Seed(time.Now().UnixNano()) + log.SetOutput(os.Stdout) log.Info("========================================") diff --git a/src/jetstream/plugins/kubernetes/api/api.go b/src/jetstream/plugins/kubernetes/api/api.go new file mode 100644 index 0000000000..b15fc1b3c3 --- /dev/null +++ b/src/jetstream/plugins/kubernetes/api/api.go @@ -0,0 +1,12 @@ +package api + +import ( + "github.com/cloudfoundry-incubator/stratos/src/jetstream/repository/interfaces" + + restclient "k8s.io/client-go/rest" +) + +type Kubernetes interface { + GetConfigForEndpoint(masterURL string, token interfaces.TokenRecord) (*restclient.Config, error) + GetKubeConfigForEndpoint(masterURL string, token interfaces.TokenRecord, namespace string) (string, error) +} diff --git a/src/jetstream/plugins/kubernetes/go.mod b/src/jetstream/plugins/kubernetes/go.mod index 4e384f1314..288ec1d075 100644 --- a/src/jetstream/plugins/kubernetes/go.mod +++ b/src/jetstream/plugins/kubernetes/go.mod @@ -19,6 +19,7 @@ require ( github.com/kubernetes-sigs/aws-iam-authenticator v0.3.0 github.com/labstack/echo v3.3.10+incompatible github.com/russross/blackfriday v2.0.0+incompatible // indirect + github.com/satori/go.uuid v1.2.0 // indirect github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect github.com/sirupsen/logrus v1.4.2 github.com/smartystreets/goconvey v1.6.4 diff --git a/src/jetstream/plugins/kubernetes/go.sum b/src/jetstream/plugins/kubernetes/go.sum index 4af88b969f..d3b2e14ea0 100644 --- a/src/jetstream/plugins/kubernetes/go.sum +++ b/src/jetstream/plugins/kubernetes/go.sum @@ -496,6 +496,9 @@ github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR github.com/russross/blackfriday v2.0.0 h1:L7Oc72h7rDqGkbUorN/ncJ4N/y220/YRezHvBoKLOFA= github.com/russross/blackfriday v2.0.0+incompatible/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday v2.0.0/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday v2.0.0+incompatible/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= diff --git a/src/jetstream/plugins/kubernetes/main.go b/src/jetstream/plugins/kubernetes/main.go index 4556391650..8a9bcee37f 100644 --- a/src/jetstream/plugins/kubernetes/main.go +++ b/src/jetstream/plugins/kubernetes/main.go @@ -16,12 +16,15 @@ import ( log "github.com/sirupsen/logrus" "github.com/cloudfoundry-incubator/stratos/src/jetstream/plugins/kubernetes/auth" + + "github.com/cloudfoundry-incubator/stratos/src/jetstream/plugins/kubernetes/terminal" ) // KubernetesSpecification is the endpoint that adds Kubernetes support to the backend type KubernetesSpecification struct { portalProxy interfaces.PortalProxy endpointType string + kubeTerminal *terminal.KubeTerminal } type KubeStatus struct { @@ -51,12 +54,20 @@ const ( kubeEndpointType = "k8s" defaultKubeClientID = "K8S_CLIENT" - // kubeDashboardPluginConfigSetting is config value send back to the client to indicate if the kube dashboard can be navigated to + // kubeDashboardPluginConfigSetting is config value sent back to the client to indicate if the kube dashboard ie enabled kubeDashboardPluginConfigSetting = "kubeDashboardEnabled" + // kubeTerminalPluginConfigSetting is config value sent back to the client to indicate if the kube terminal is enabled + kubeTerminalPluginConfigSetting = "kubeTerminalEnabled" ) +// Init creates a new instance of the Kubernetes plugin func Init(portalProxy interfaces.PortalProxy) (interfaces.StratosPlugin, error) { - return &KubernetesSpecification{portalProxy: portalProxy, endpointType: kubeEndpointType}, nil + kubeTerminal := terminal.NewKubeTerminal(portalProxy) + kube := &KubernetesSpecification{portalProxy: portalProxy, endpointType: kubeEndpointType, kubeTerminal: kubeTerminal} + if kubeTerminal != nil { + kubeTerminal.Kube = kube + } + return kube, nil } func (c *KubernetesSpecification) GetEndpointPlugin() (interfaces.EndpointPlugin, error) { @@ -133,6 +144,14 @@ func (c *KubernetesSpecification) Init() error { // Kube dashboard is enabled by Tech Preview mode c.portalProxy.GetConfig().PluginConfig[kubeDashboardPluginConfigSetting] = strconv.FormatBool(c.portalProxy.GetConfig().EnableTechPreview) + // Kube terminal is enabled by Tech Preview mode + c.portalProxy.GetConfig().PluginConfig[kubeTerminalPluginConfigSetting] = strconv.FormatBool(c.portalProxy.GetConfig().EnableTechPreview) + + // Kick off the cleanup of any old kube terminal pods + if c.kubeTerminal != nil { + c.kubeTerminal.StartCleanup() + } + return nil } @@ -161,6 +180,10 @@ func (c *KubernetesSpecification) AddSessionGroupRoutes(echoGroup *echo.Group) { echoGroup.GET("/helm/releases/:endpoint/:namespace/:name/status", c.GetReleaseStatus) echoGroup.GET("/helm/releases/:endpoint/:namespace/:name", c.GetRelease) + // Kube Terminal + if c.kubeTerminal != nil { + echoGroup.GET("/kubeterminal/:guid", c.kubeTerminal.Start) + } } func (c *KubernetesSpecification) Info(apiEndpoint string, skipSSLValidation bool) (interfaces.CNSIRecord, interface{}, error) { diff --git a/src/jetstream/plugins/kubernetes/terminal/cleanup.go b/src/jetstream/plugins/kubernetes/terminal/cleanup.go new file mode 100644 index 0000000000..621921e1e5 --- /dev/null +++ b/src/jetstream/plugins/kubernetes/terminal/cleanup.go @@ -0,0 +1,83 @@ +package terminal + +import ( + "math/rand" + "strconv" + "time" + + log "github.com/sirupsen/logrus" + + metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Wait time in minutes after random intiial wait +const waitPeriod = 10 + +// StartCleanup starts a background routine to cleanup orphaned pods +func (k *KubeTerminal) StartCleanup() { + go k.cleanup() +} + +func (k *KubeTerminal) cleanup() { + // Use a random initial wait before cleaning up + // If we had more than one backend, this helps to ensure they are not all trying to cleanup at the same time + wait := rand.Intn(30) + log.Debug("Kubernetes Terminal cleanup will start in %d minutes", wait) + + for { + time.Sleep(time.Duration(wait) * time.Minute) + log.Debug("Cleaning up stale Kubernetes Terminal pods and secrets ...") + + // Get all pods with a given label + podClient, secretClient, err := k.getClients() + if err == nil { + // Only want the pods that are kube terminals + options := metaV1.ListOptions{} + options.LabelSelector = "stratos-role=kube-terminal" + pods, err := podClient.List(options) + if err == nil { + for _, pod := range pods.Items { + if sessionID, ok := pod.Annotations[stratosSessionAnnotation]; ok { + i, err := strconv.Atoi(sessionID) + if err == nil { + isValid, err := k.PortalProxy.GetSessionDataStore().IsValidSession(i) + if err == nil && !isValid { + log.Debugf("Deleting pod %s", pod.Name) + podClient.Delete(pod.Name, nil) + } + } + } + } + } else { + log.Debug("Kube Terminal Cleanup: Could not get pods") + log.Debug(err) + } + + // Only want the secrets that are kube terminals + secrets, err := secretClient.List(options) + if err == nil { + for _, secret := range secrets.Items { + if sessionID, ok := secret.Annotations[stratosSessionAnnotation]; ok { + i, err := strconv.Atoi(sessionID) + if err == nil { + isValid, err := k.PortalProxy.GetSessionDataStore().IsValidSession(i) + if err == nil && !isValid { + log.Debugf("Deleting secret %s", secret.Name) + secretClient.Delete(secret.Name, nil) + } + } + } + } + } else { + log.Warn("Kube Terminal Cleanup: Could not get secrets") + log.Warn(err) + } + + } else { + log.Warn("Kube Terminal Cleanup: Could not get clients") + log.Warn(err) + } + + wait = waitPeriod + } +} diff --git a/src/jetstream/plugins/kubernetes/terminal/helpers.go b/src/jetstream/plugins/kubernetes/terminal/helpers.go new file mode 100644 index 0000000000..b34caf9a9a --- /dev/null +++ b/src/jetstream/plugins/kubernetes/terminal/helpers.go @@ -0,0 +1,266 @@ +package terminal + +import ( + "encoding/json" + "errors" + "fmt" + "regexp" + "strings" + "time" + + "github.com/labstack/echo" + uuid "github.com/satori/go.uuid" + log "github.com/sirupsen/logrus" + + "github.com/cloudfoundry-incubator/stratos/src/jetstream/plugins/kubernetes/auth" + "github.com/cloudfoundry-incubator/stratos/src/jetstream/repository/interfaces" + + "github.com/gorilla/websocket" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + corev1 "k8s.io/client-go/kubernetes/typed/core/v1" +) + +// PodCreationData stores the clients and names used to create pod and secret +type PodCreationData struct { + Namespace string + PodClient corev1.PodInterface + SecretClient corev1.SecretInterface + PodName string + SecretName string +} + +func (k *KubeTerminal) getClients() (corev1.PodInterface, corev1.SecretInterface, error) { + + // Create a token record for Token Auth using the Service Account token + token := auth.NewKubeTokenAuthTokenRecord(k.PortalProxy, string(k.Token)) + config, err := k.Kube.GetConfigForEndpoint(k.APIServer, *token) + if err != nil { + return nil, nil, errors.New("Can not get Kubernetes config for specified endpoint") + } + kubeClient, err := kubernetes.NewForConfig(config) + if err != nil { + log.Error("Could not get kube client") + return nil, nil, err + } + + podClient := kubeClient.CoreV1().Pods(k.Namespace) + secretsClient := kubeClient.CoreV1().Secrets(k.Namespace) + return podClient, secretsClient, nil +} + +// Create a pod for a user to run the Kube terminal +func (k *KubeTerminal) createPod(c echo.Context, kubeConfig, kubeVersion string, ws *websocket.Conn) (*PodCreationData, error) { + // Unique ID for the secret and pod name + id := uuid.NewV4().String() + id = strings.ReplaceAll(id, "-", "") + // Names for the secret and pod + secretName := fmt.Sprintf("terminal-%s", id) + podName := secretName + podClient, secretClient, err := k.getClients() + result := &PodCreationData{} + result.Namespace = k.Namespace + + // Get the session ID + sessionID := "" + session, err := k.PortalProxy.GetSession(c) + if err == nil { + sessionID = session.ID + } + + // Create the secret + secretSpec := &v1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "secret", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: k.Namespace, + }, + Type: "Opaque", + } + + setResourcMetadata(&secretSpec.ObjectMeta, sessionID) + + secretSpec.Data = make(map[string][]byte) + secretSpec.Data["kubeconfig"] = []byte(kubeConfig) + + // Get Helm repository script if we have Helm repositories + helmSetup := getHelmRepoSetupScript(k.PortalProxy) + if len(helmSetup) > 0 { + secretSpec.Data["helm-setup"] = []byte(helmSetup) + } + + _, err = secretClient.Create(secretSpec) + if err != nil { + log.Warnf("Kubernetes Terminal: Unable to create Secret: %+v", err) + return result, err + } + + result.SecretClient = secretClient + result.SecretName = secretName + + // Pod + podSpec := &v1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "pod", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: podName, + Namespace: k.Namespace, + }, + } + + // Label the pod, so we can find it as a kube terminal pod + setResourcMetadata(&podSpec.ObjectMeta, sessionID) + + // Don't mount a service account token + off := false + podSpec.Spec.AutomountServiceAccountToken = &off + podSpec.Spec.EnableServiceLinks = &off + podSpec.Spec.RestartPolicy = "Never" + podSpec.Spec.DNSPolicy = "Default" + + volumeMountsSpec := make([]v1.VolumeMount, 1) + volumeMountsSpec[0].Name = "kubeconfig" + volumeMountsSpec[0].MountPath = "/home/stratos/.stratos" + volumeMountsSpec[0].ReadOnly = true + + containerSpec := make([]v1.Container, 1) + containerSpec[0].Name = consoleContainerName + containerSpec[0].Image = k.Image + containerSpec[0].ImagePullPolicy = "Always" + containerSpec[0].VolumeMounts = volumeMountsSpec + + // Add env var for kube version + containerSpec[0].Env = make([]v1.EnvVar, 1) + containerSpec[0].Env[0].Name = "K8S_VERSION" + containerSpec[0].Env[0].Value = kubeVersion + + podSpec.Spec.Containers = containerSpec + + volumesSpec := make([]v1.Volume, 1) + volumesSpec[0].Name = "kubeconfig" + volumesSpec[0].Secret = &v1.SecretVolumeSource{ + SecretName: secretName, + } + podSpec.Spec.Volumes = volumesSpec + + // Create a new pod + pod, err := podClient.Create(podSpec) + if err != nil { + log.Warnf("Kubernetes Terminal: Unable to create Pod: %+v", err) + // Secret will get cleaned up by caller + return result, err + } + + result.PodClient = podClient + result.PodName = podName + + sendProgressMessage(ws, "Waiting for Kubernetes Terminal to start up ...") + + // Wait for the pod to be running + timeout := 60 + statusOptions := metav1.GetOptions{} + for { + status, err := podClient.Get(pod.Name, statusOptions) + if err == nil && status.Status.Phase == "Running" { + break; + } + + timeout = timeout - 1 + if timeout == 0 { + err = errors.New("Timed out waiting for pod to enter ready state") + break + } + + // Sleep + time.Sleep(1500 * time.Millisecond) + } + + return result, err +} + +func setResourcMetadata(metadata *metav1.ObjectMeta, sessionID string) { + // Label the kubeerntes resource, so we can find it as a kube terminal pod + metadata.Labels = make(map[string]string) + metadata.Labels[stratosRoleLabel] = stratosKubeTerminalRole + metadata.Annotations = make(map[string]string) + if len(sessionID) > 0 { + metadata.Annotations[stratosSessionAnnotation] = sessionID + } +} + +// Cleanup the pod and secret +func (k *KubeTerminal) cleanupPodAndSecret(podData *PodCreationData) error { + if len(podData.PodName) > 0 { + //captureBashHistory(podData) + podData.PodClient.Delete(podData.PodName, nil) + } + + if len(podData.SecretName) > 0 { + podData.SecretClient.Delete(podData.SecretName, nil) + } + + return nil +} + +func getHelmRepoSetupScript(portalProxy interfaces.PortalProxy) string { + str := "" + + // Get all of the helm endpoints + endpoints, err := portalProxy.ListEndpoints() + if err != nil { + log.Error("Can not list Helm Repository endpoints") + return str + } + + for _, ep := range endpoints { + if ep.CNSIType == "helm" { + str += fmt.Sprintf("helm repo add %s %s > /dev/null\n", ep.Name, ep.APIEndpoint) + } + } + + return str +} + +func sendProgressMessage(ws *websocket.Conn, progressMsg string) { + // Send a message to say that we are creating the pod + msg := fmt.Sprintf("\033]2;%s\007", progressMsg) + bytes := fmt.Sprintf("% x\n", []byte(msg)) + if err := ws.WriteMessage(websocket.TextMessage, []byte(bytes)); err != nil { + log.Error("Could not send message to client to indicate terminal is starting") + } +} + +func (k *KubeTerminal) getKubeVersion(endpointID, userID string) (string, error) { + response, err := k.PortalProxy.DoProxySingleRequest(endpointID, userID, "GET", "/api/v1/nodes", nil, nil) + if err != nil || response.StatusCode != 200 { + return "", errors.New("Could not fetch node list") + } + + var nodes v1.NodeList + err = json.Unmarshal(response.Response, &nodes) + if err != nil { + return "", errors.New("Could not unmarshal node list") + } + + if len(nodes.Items) > 0 { + // Get the version number - remove any 'v' perfix or '+' suffix + version := nodes.Items[0].Status.NodeInfo.KubeletVersion + reg, err := regexp.Compile("[^0-9\\.]+") + if err == nil { + version = reg.ReplaceAllString(version, "") + } + parts := strings.Split(version, ".") + if len(parts) > 1 { + v := fmt.Sprintf("%s.%s", parts[0], parts[1]) + return v, nil + } + } + + return "", errors.New("Can not get Kubernetes version") +} diff --git a/src/jetstream/plugins/kubernetes/terminal/start.go b/src/jetstream/plugins/kubernetes/terminal/start.go new file mode 100644 index 0000000000..63c90fe519 --- /dev/null +++ b/src/jetstream/plugins/kubernetes/terminal/start.go @@ -0,0 +1,204 @@ +package terminal + +import ( + "crypto/tls" + "errors" + "fmt" + + //"encoding/base64" + "encoding/json" + "net/http" + "strings" + "time" + + "github.com/labstack/echo" + log "github.com/sirupsen/logrus" + + "github.com/cloudfoundry-incubator/stratos/src/jetstream/repository/interfaces" + + "github.com/gorilla/websocket" +) + +// TTY Resize, see: https://gitlab.cncf.ci/kubernetes/kubernetes/commit/3b21a9901bcd48bb452d3bf1a0cddc90dae142c4#9691a2f9b9c30711f0397221db0b9ac55ab0e2d1 + +// Allow connections from any Origin +var upgrader = websocket.Upgrader{ + CheckOrigin: func(r *http.Request) bool { return true }, +} + +// KeyCode - JSON object that is passed from the front-end to notify of a key press or a term resize +type KeyCode struct { + Key string `json:"key"` + Cols int `json:"cols"` + Rows int `json:"rows"` +} + +type terminalSize struct { + Width uint16 + Height uint16 +} + +const ( + // Time allowed to write a message to the peer. + writeWait = 10 * time.Second +) + +// Start handles web-socket request to launch a Kubernetes Terminal +func (k *KubeTerminal) Start(c echo.Context) error { + log.Debug("Kube Terminal start request") + + endpointGUID := c.Param("guid") + userGUID := c.Get("user_id").(string) + + cnsiRecord, err := k.PortalProxy.GetCNSIRecord(endpointGUID) + if err != nil { + return errors.New("Could not get endpoint information") + } + + // Get token for this user + tokenRecord, ok := k.PortalProxy.GetCNSITokenRecord(endpointGUID, userGUID) + if !ok { + return errors.New("Could not get token") + } + + // This is the kube config for the kubernetes endpoint that we want configured in the Terminal + kubeConfig, err := k.Kube.GetKubeConfigForEndpoint(cnsiRecord.APIEndpoint.String(), tokenRecord, "") + if err != nil { + return errors.New("Can not get Kubernetes config for specified endpoint") + } + + // Determine the Kubernetes version + version, _ := k.getKubeVersion(endpointGUID, userGUID) + log.Debugf("Kubernetes Version: %s", version) + + // Upgrade the web socket for the incoming request + ws, pingTicker, err := interfaces.UpgradeToWebSocket(c) + if err != nil { + return err + } + defer ws.Close() + defer pingTicker.Stop() + + // We are now in web socket land - we don't want any middleware to change the HTTP response + c.Set("Stratos-WebSocket", "true") + + // Send a message to say that we are creating the pod + sendProgressMessage(ws, "Launching Kubernetes Terminal ... one moment please") + + podData, err := k.createPod(c, kubeConfig, version, ws) + + // Clear progress message + sendProgressMessage(ws, "") + + if err != nil { + log.Errorf("Kubernetes Terminal: Error creating secret or pod: %+v", err) + k.cleanupPodAndSecret(podData) + + // Send error message + sendProgressMessage(ws, "!" + err.Error()) + return err + } + + // API Endpoint to SSH/exec into a container + target := fmt.Sprintf("%s/api/v1/namespaces/%s/pods/%s/exec?command=/bin/bash&stdin=true&stderr=true&stdout=true&tty=true", k.APIServer, k.Namespace, podData.PodName) + + dialer := &websocket.Dialer{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: true, + }, + } + + if strings.HasPrefix(target, "https://") { + target = "wss://" + target[8:] + } else { + target = "ws://" + target[7:] + } + + header := &http.Header{} + header.Add("Authorization", fmt.Sprintf("Bearer %s", string(k.Token))) + wsConn, _, err := dialer.Dial(target, *header) + + if err == nil { + defer wsConn.Close() + } + + if err != nil { + k.cleanupPodAndSecret(podData) + log.Warn("Kube Terminal: Could not connect to pod") + // No point returning an error - we've already upgraded to web sockets, so we can't use the HTTP response now + return nil + } + + stdoutDone := make(chan bool) + go pumpStdout(ws, wsConn, stdoutDone) + + // If the downstream connection is closed, close the other web socket as well + ws.SetCloseHandler(func (code int, text string) error { + wsConn.Close() + return nil + }) + + // Read the input from the web socket and pipe it to the SSH client + for { + _, r, err := ws.ReadMessage() + if err != nil { + // Check to see if this was because the web socket was closed cleanly + closed := false + select { + case msg := <-stdoutDone: + closed = msg + } + if !closed { + log.Errorf("Kubernetes terminal: error reading message from web socket: %+v", err) + } + log.Debug("Kube Terminal cleaning up ....") + k.cleanupPodAndSecret(podData) + + // No point returning an error - we've already upgraded to web sockets, so we can't use the HTTP response now + return nil + } + + res := KeyCode{} + json.Unmarshal(r, &res) + + if res.Cols == 0 { + slice := make([]byte, 1) + slice[0] = 0 + slice = append(slice, []byte(res.Key)...) + wsConn.WriteMessage(websocket.TextMessage, slice) + } else { + size := terminalSize{ + Width: uint16(res.Cols), + Height: uint16(res.Rows), + } + j, _ := json.Marshal(size) + resizeStream := []byte{4} + slice := append(resizeStream, j...) + wsConn.WriteMessage(websocket.TextMessage, slice) + } + } + + // Cleanup + log.Error("Kubernetes Terminal is cleaning up") + + return k.cleanupPodAndSecret(podData) +} + +func pumpStdout(ws *websocket.Conn, source *websocket.Conn, done chan bool) { + for { + _, r, err := source.ReadMessage() + if err != nil { + // Close + ws.Close() + done <- true + break + } + ws.SetWriteDeadline(time.Now().Add(writeWait)) + bytes := fmt.Sprintf("% x\n", r[1:]) + if err := ws.WriteMessage(websocket.TextMessage, []byte(bytes)); err != nil { + log.Errorf("Kubernetes Terminal failed to write message: %+v", err) + ws.Close() + break + } + } +} diff --git a/src/jetstream/plugins/kubernetes/terminal/terminal.go b/src/jetstream/plugins/kubernetes/terminal/terminal.go new file mode 100644 index 0000000000..83d259147b --- /dev/null +++ b/src/jetstream/plugins/kubernetes/terminal/terminal.go @@ -0,0 +1,85 @@ +package terminal + +import ( + "fmt" + "io/ioutil" + + "github.com/cloudfoundry-incubator/stratos/src/jetstream/plugins/kubernetes/api" + "github.com/cloudfoundry-incubator/stratos/src/jetstream/repository/interfaces" + "github.com/cloudfoundry-incubator/stratos/src/jetstream/repository/interfaces/config" + + log "github.com/sirupsen/logrus" +) + +const ( + serviceAccountTokenFile = "/var/run/secrets/kubernetes.io/serviceaccount/token" + serviceHostEnvVar = "KUBERNETES_SERVICE_HOST" + servicePortEnvVar = "KUBERNETES_SERVICE_PORT" + // For dev - read token from env var + serviceTokenEnvVar = "KUBE_TERMINAL_SERVICE_ACCOUNT_TOKEN" + + stratosRoleLabel = "stratos-role" + stratosKubeTerminalRole = "kube-terminal" + stratosSessionAnnotation = "stratos-session" + + consoleContainerName = "kube-terminal" +) + +// KubeTerminal supports spawning pods to provide a CLI environment to the user +type KubeTerminal struct { + PortalProxy interfaces.PortalProxy + Namespace string `configName:"STRATOS_KUBERNETES_NAMESPACE"` + Image string `configName:"STRATOS_KUBERNETES_TERMINAL_IMAGE"` + Token []byte + APIServer string + Kube api.Kubernetes +} + +// NewKubeTerminal checks that the environment is set up to support the Kube Terminal +func NewKubeTerminal(p interfaces.PortalProxy) *KubeTerminal { + // Only enabled in tech preview + if !p.GetConfig().EnableTechPreview { + log.Info("Kube Terminal not enabled - requires tech preview") + return nil + } + + kt := &KubeTerminal{ + PortalProxy: p, + } + if err := config.Load(kt, p.Env().Lookup); err != nil { + log.Warnf("Unable to load Kube Terminal configuration. %v", err) + return nil + } + + // Check that we have everything we need + if len(kt.Image) == 0 || len(kt.Namespace) == 0 { + log.Warn("Kube Terminal configuration is not complete") + return nil + } + + // Read the Kubernetes API Endpoint + host, hostFound := p.Env().Lookup(serviceHostEnvVar) + port, portFound := p.Env().Lookup(servicePortEnvVar) + if !hostFound || !portFound { + log.Warn("Kubernetes API Server configuration not found (host and/or port env vars not set)") + return nil + } + kt.APIServer = fmt.Sprintf("https://%s:%s", host, port) + + // Read the Service Account Token + token, err := ioutil.ReadFile(serviceAccountTokenFile) + if err != nil { + // Check env var + tkn, found := p.Env().Lookup(serviceTokenEnvVar) + if !found { + log.Warnf("Unable to load Service Account token. %v", err) + return nil + } + token = []byte(tkn) + } + + kt.Token = token + + log.Debug("Kubernetes Terminal configured") + return kt +} diff --git a/src/jetstream/repository/interfaces/sessiondata.go b/src/jetstream/repository/interfaces/sessiondata.go index 4128f4280f..37884b576a 100644 --- a/src/jetstream/repository/interfaces/sessiondata.go +++ b/src/jetstream/repository/interfaces/sessiondata.go @@ -10,9 +10,13 @@ type SessionDataStore interface { SetValues(session, group string, values map[string]string, autoExpire bool) error DeleteValues(session, group string) error + IsValidSession(id int) (bool, error) + // Cleanup runs a background goroutine every interval that deletes expired sessions from the database Cleanup(interval time.Duration) (chan<- struct{}, <-chan struct{}) // StopCleanup stops the background cleanup from running StopCleanup(quit chan<- struct{}, done <-chan struct{}) + + } diff --git a/src/jetstream/repository/sessiondata/psql_sessiondata.go b/src/jetstream/repository/sessiondata/psql_sessiondata.go index d0c3e7c1a0..c60ad32b63 100644 --- a/src/jetstream/repository/sessiondata/psql_sessiondata.go +++ b/src/jetstream/repository/sessiondata/psql_sessiondata.go @@ -3,6 +3,8 @@ package sessiondata import ( "database/sql" "fmt" + "strconv" + "time" log "github.com/sirupsen/logrus" @@ -16,12 +18,15 @@ var insertSessionDataValue = `INSERT INTO session_data (session, groupName, name var deleteSessionGroupData = `DELETE FROM session_data WHERE session=$1 AND groupName=$2` -// Expire data for sessions that not longer exist +// Expire data for sessions that no longer exist var expireSessionData = `UPDATE session_data SET expired=true WHERE session NOT IN (SELECT id from sessions)` // Delete data for sessions that no longer exist var deleteSessionData = `DELETE FROM session_data WHERE expired=true AND keep_on_expire=false` +// Check if a session valid +var isValidSession = `SELECT id, expires_on from sessions WHERE id=$1` + // SessionDataRepository is a RDB-backed Session Data repository type SessionDataRepository struct { db *sql.DB @@ -40,6 +45,7 @@ func InitRepositoryProvider(databaseProvider string) { deleteSessionGroupData = datastore.ModifySQLStatement(deleteSessionGroupData, databaseProvider) expireSessionData = datastore.ModifySQLStatement(expireSessionData, databaseProvider) deleteSessionData = datastore.ModifySQLStatement(deleteSessionData, databaseProvider) + isValidSession = datastore.ModifySQLStatement(isValidSession, databaseProvider) } // GetValues returns all values from the config table as a map @@ -100,3 +106,27 @@ func (c *SessionDataRepository) SetValues(session, group string, values map[stri return nil } + +// IsValidSession - Determines if the given session ID is still valid (has not expired) +func (c *SessionDataRepository) IsValidSession(session int) (bool, error) { + var ( + id string + expiry time.Time + ) + + err := c.db.QueryRow(isValidSession, strconv.Itoa(session)).Scan(&id, &expiry) + + switch { + case err == sql.ErrNoRows: + // No record with this ID - session does not exist + return false, nil + case err != nil: + return false, fmt.Errorf("Error trying to find Session record: %v", err) + default: + // do nothing + } + + // Check if the session has expired + now := time.Now() + return expiry.After(now), nil +}