Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Updates for gcloud 0.9.54 #6270

Merged
merged 1 commit into from Apr 2, 2015
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
13 changes: 7 additions & 6 deletions cluster/gke/util.sh
Expand Up @@ -91,6 +91,7 @@ function verify-prereqs() {
gcloud_prompt="-q"
fi
gcloud ${gcloud_prompt:-} components update preview || true
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Remove the update preview line?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We still need the preview component for instance-groups and managed-instance-groups, which were not moved to alpha.

gcloud ${gcloud_prompt:-} components update alpha || true
gcloud ${gcloud_prompt:-} components update || true
}

Expand Down Expand Up @@ -128,7 +129,7 @@ function kube-up() {
fi

# Bring up the cluster.
"${GCLOUD}" preview container clusters create "${CLUSTER_NAME}" \
"${GCLOUD}" alpha container clusters create "${CLUSTER_NAME}" \
--zone="${ZONE}" \
--project="${PROJECT}" \
--cluster-api-version="${CLUSTER_API_VERSION:-}" \
Expand Down Expand Up @@ -175,10 +176,10 @@ function test-setup() {
function get-password() {
echo "... in get-password()" >&2
detect-project >&2
KUBE_USER=$("${GCLOUD}" preview container clusters describe \
KUBE_USER=$("${GCLOUD}" alpha container clusters describe \
--project="${PROJECT}" --zone="${ZONE}" "${CLUSTER_NAME}" \
| grep user | cut -f 4 -d ' ')
KUBE_PASSWORD=$("${GCLOUD}" preview container clusters describe \
KUBE_PASSWORD=$("${GCLOUD}" alpha container clusters describe \
--project="${PROJECT}" --zone="${ZONE}" "${CLUSTER_NAME}" \
| grep password | cut -f 4 -d ' ')
}
Expand All @@ -195,7 +196,7 @@ function detect-master() {
echo "... in detect-master()" >&2
detect-project >&2
KUBE_MASTER="k8s-${CLUSTER_NAME}-master"
KUBE_MASTER_IP=$("${GCLOUD}" preview container clusters describe \
KUBE_MASTER_IP=$("${GCLOUD}" alpha container clusters describe \
--project="${PROJECT}" --zone="${ZONE}" "${CLUSTER_NAME}" \
| grep endpoint | cut -f 2 -d ' ')
}
Expand All @@ -218,7 +219,7 @@ function detect-minions() {
function detect-minion-names {
detect-project
export MINION_NAMES=""
count=$("${GCLOUD}" preview container clusters describe --project="${PROJECT}" --zone="${ZONE}" "${CLUSTER_NAME}" | grep numNodes | cut -f 2 -d ' ')
count=$("${GCLOUD}" alpha container clusters describe --project="${PROJECT}" --zone="${ZONE}" "${CLUSTER_NAME}" | grep numNodes | cut -f 2 -d ' ')
for x in $(seq 1 $count); do
export MINION_NAMES="${MINION_NAMES} k8s-${CLUSTER_NAME}-node-${x} ";
done
Expand Down Expand Up @@ -286,6 +287,6 @@ function test-teardown() {
function kube-down() {
echo "... in kube-down()" >&2
detect-project >&2
"${GCLOUD}" preview container clusters delete --project="${PROJECT}" \
"${GCLOUD}" alpha container clusters delete --project="${PROJECT}" \
--zone="${ZONE}" "${CLUSTER_NAME}"
}
2 changes: 1 addition & 1 deletion cluster/kubectl.sh
Expand Up @@ -108,7 +108,7 @@ if [[ "$KUBERNETES_PROVIDER" == "gke" ]]; then
kubectl="${GCLOUD}"
# GKE runs kubectl through gcloud.
config=(
"preview"
"alpha"
"container"
"kubectl"
"--project=${PROJECT}"
Expand Down
40 changes: 20 additions & 20 deletions cmd/e2e/e2e.go
Expand Up @@ -28,40 +28,40 @@ import (
)

var (
kubeConfig = flag.String(clientcmd.RecommendedConfigPathFlag, "", "Path to kubeconfig containing embeded authinfo. Will use cluster/user info from 'current-context'")
authConfig = flag.String("auth_config", "", "Path to the auth info file.")
certDir = flag.String("cert_dir", "", "Path to the directory containing the certs. Default is empty, which doesn't use certs.")
gceProject = flag.String("gce_project", "", "The GCE project being used, if applicable")
gceZone = flag.String("gce_zone", "", "GCE zone being used, if applicable")
host = flag.String("host", "", "The host to connect to")
masterName = flag.String("kube_master", "", "Name of the kubernetes master. Only required if provider is gce or gke")
provider = flag.String("provider", "", "The name of the Kubernetes provider")
orderseed = flag.Int64("orderseed", 0, "If non-zero, seed of random test shuffle order. (Otherwise random.)")
repoRoot = flag.String("repo_root", "./", "Root directory of kubernetes repository, for finding test files. Default assumes working directory is repository root")
reportDir = flag.String("report_dir", "", "Path to the directory where the JUnit XML reports should be saved. Default is empty, which doesn't generate these reports.")
times = flag.Int("times", 1, "Number of times each test is eligible to be run. Individual order is determined by shuffling --times instances of each test using --orderseed (like a multi-deck shoe of cards).")
testList util.StringList
context = &e2e.TestContextType{}
gceConfig = &context.GCEConfig

orderseed = flag.Int64("orderseed", 0, "If non-zero, seed of random test shuffle order. (Otherwise random.)")
reportDir = flag.String("report_dir", "", "Path to the directory where the JUnit XML reports should be saved. Default is empty, which doesn't generate these reports.")
times = flag.Int("times", 1, "Number of times each test is eligible to be run. Individual order is determined by shuffling --times instances of each test using --orderseed (like a multi-deck shoe of cards).")
testList util.StringList
)

func init() {
flag.VarP(&testList, "test", "t", "Test to execute (may be repeated or comma separated list of tests.) Defaults to running all tests.")

flag.StringVar(&context.KubeConfig, clientcmd.RecommendedConfigPathFlag, "", "Path to kubeconfig containing embeded authinfo.")
flag.StringVar(&context.KubeContext, clientcmd.FlagContext, "", "kubeconfig context to use/override. If unset, will use value from 'current-context'")
flag.StringVar(&context.AuthConfig, "auth_config", "", "Path to the auth info file.")
flag.StringVar(&context.CertDir, "cert_dir", "", "Path to the directory containing the certs. Default is empty, which doesn't use certs.")
flag.StringVar(&context.Host, "host", "", "The host to connect to")
flag.StringVar(&context.RepoRoot, "repo_root", "./", "Root directory of kubernetes repository, for finding test files. Default assumes working directory is repository root")
flag.StringVar(&context.Provider, "provider", "", "The name of the Kubernetes provider")
flag.StringVar(&gceConfig.MasterName, "kube_master", "", "Name of the kubernetes master. Only required if provider is gce or gke")
flag.StringVar(&gceConfig.ProjectID, "gce_project", "", "The GCE project being used, if applicable")
flag.StringVar(&gceConfig.Zone, "gce_zone", "", "GCE zone being used, if applicable")
}

func main() {
util.InitFlags()
goruntime.GOMAXPROCS(goruntime.NumCPU())
if *provider == "" {
if context.Provider == "" {
glog.Info("The --provider flag is not set. Treating as a conformance test. Some tests may not be run.")
os.Exit(1)
}
if *times <= 0 {
glog.Error("Invalid --times (negative or no testing requested)!")
os.Exit(1)
}
gceConfig := &e2e.GCEConfig{
ProjectID: *gceProject,
Zone: *gceZone,
MasterName: *masterName,
}
e2e.RunE2ETests(*kubeConfig, *authConfig, *certDir, *host, *repoRoot, *provider, gceConfig, *orderseed, *times, *reportDir, testList)
e2e.RunE2ETests(context, *orderseed, *times, *reportDir, testList)
}
8 changes: 4 additions & 4 deletions hack/ginkgo-e2e.sh
Expand Up @@ -95,12 +95,12 @@ if [[ -z "${AUTH_CONFIG:-}" ]]; then
"--kubeconfig=${HOME}/.kubernetes_vagrant_kubeconfig"
)
elif [[ "${KUBERNETES_PROVIDER}" == "gke" ]]; then
# With GKE, our auth and certs are in gcloud's config directory.
# GKE stores its own kubeconfig in gcloud's config directory.
detect-project &> /dev/null
cfg_dir="${GCLOUD_CONFIG_DIR}/${PROJECT}.${ZONE}.${CLUSTER_NAME}"
auth_config=(
"--auth_config=${cfg_dir}/kubernetes_auth"
"--cert_dir=${cfg_dir}"
"--kubeconfig=${GCLOUD_CONFIG_DIR}/kubeconfig"
# gcloud doesn't set the current-context, so we have to set it
"--context=gke_${PROJECT}_${ZONE}_${CLUSTER_NAME}"
)
elif [[ "${KUBERNETES_PROVIDER}" == "gce" ]]; then
auth_config=(
Expand Down
8 changes: 4 additions & 4 deletions test/e2e/certs.go
Expand Up @@ -32,14 +32,14 @@ var _ = Describe("MasterCerts", func() {
})

It("should have all expected certs on the master", func() {
if testContext.provider != "gce" && testContext.provider != "gke" {
By(fmt.Sprintf("Skipping MasterCerts test for cloud provider %s (only supported for gce and gke)", testContext.provider))
if testContext.Provider != "gce" && testContext.Provider != "gke" {
By(fmt.Sprintf("Skipping MasterCerts test for cloud provider %s (only supported for gce and gke)", testContext.Provider))
return
}

for _, certFile := range []string{"kubecfg.key", "kubecfg.crt", "ca.crt"} {
cmd := exec.Command("gcloud", "compute", "ssh", "--project", testContext.gceConfig.ProjectID,
"--zone", testContext.gceConfig.Zone, testContext.gceConfig.MasterName,
cmd := exec.Command("gcloud", "compute", "ssh", "--project", testContext.GCEConfig.ProjectID,
"--zone", testContext.GCEConfig.Zone, testContext.GCEConfig.MasterName,
"--command", fmt.Sprintf("ls /srv/kubernetes/%s", certFile))
if _, err := cmd.CombinedOutput(); err != nil {
Fail(fmt.Sprintf("Error checking for cert file %s on master: %v", certFile, err))
Expand Down
4 changes: 2 additions & 2 deletions test/e2e/driver.go
Expand Up @@ -53,8 +53,8 @@ func (t *testResult) Fail() { *t = false }

// Run each Go end-to-end-test. This function assumes the
// creation of a test cluster.
func RunE2ETests(kubeConfig, authConfig, certDir, host, repoRoot, provider string, gceConfig *GCEConfig, orderseed int64, times int, reportDir string, testList []string) {
testContext = testContextType{kubeConfig, authConfig, certDir, host, repoRoot, provider, *gceConfig}
func RunE2ETests(context *TestContextType, orderseed int64, times int, reportDir string, testList []string) {
testContext = *context
util.ReallyCrash = true
util.InitLogs()
defer util.FlushLogs()
Expand Down
4 changes: 2 additions & 2 deletions test/e2e/es_cluster_logging.go
Expand Up @@ -60,8 +60,8 @@ func ClusterLevelLoggingWithElasticsearch(c *client.Client) {
// TODO: For now assume we are only testing cluster logging with Elasticsearch
// on GCE. Once we are sure that Elasticsearch cluster level logging
// works for other providers we should widen this scope of this test.
if testContext.provider != "gce" {
Logf("Skipping cluster level logging test for provider %s", testContext.provider)
if testContext.Provider != "gce" {
Logf("Skipping cluster level logging test for provider %s", testContext.Provider)
return
}

Expand Down
2 changes: 1 addition & 1 deletion test/e2e/events.go
Expand Up @@ -41,7 +41,7 @@ var _ = Describe("Events", func() {
})

It("should be sent by kubelets and the scheduler about pods scheduling and running", func() {
provider := testContext.provider
provider := testContext.Provider
if len(provider) > 0 && provider != "gce" && provider != "gke" && provider != "aws" {
By(fmt.Sprintf("skipping TestKubeletSendsEvent on cloud provider %s", provider))
return
Expand Down
4 changes: 2 additions & 2 deletions test/e2e/kubectl.go
Expand Up @@ -53,7 +53,7 @@ var _ = Describe("kubectl", func() {

Describe("update-demo", func() {
var (
updateDemoRoot = filepath.Join(testContext.repoRoot, "examples/update-demo/v1beta1")
updateDemoRoot = filepath.Join(testContext.RepoRoot, "examples/update-demo/v1beta1")
nautilusPath = filepath.Join(updateDemoRoot, "nautilus-rc.yaml")
kittenPath = filepath.Join(updateDemoRoot, "kitten-rc.yaml")
)
Expand Down Expand Up @@ -94,7 +94,7 @@ var _ = Describe("kubectl", func() {
})

Describe("guestbook", func() {
var guestbookPath = filepath.Join(testContext.repoRoot, "examples/guestbook")
var guestbookPath = filepath.Join(testContext.RepoRoot, "examples/guestbook")

It("should create and stop a working application", func() {
defer cleanup(guestbookPath, frontendSelector, redisMasterSelector, redisSlaveSelector)
Expand Down
6 changes: 3 additions & 3 deletions test/e2e/monitoring.go
Expand Up @@ -40,9 +40,9 @@ var _ = Describe("Monitoring", func() {
})

It("verify monitoring pods and all cluster nodes are available on influxdb using heapster.", func() {
if testContext.provider != "gce" {
if testContext.Provider != "gce" {
By(fmt.Sprintf("Skipping Monitoring test, which is only supported for provider gce (not %s)",
testContext.provider))
testContext.Provider))
return
}
testMonitoringUsingHeapsterInfluxdb(c)
Expand Down Expand Up @@ -192,7 +192,7 @@ func validatePodsAndNodes(influxdbClient *influxdb.Client, expectedPods, expecte
}

func getMasterHost() string {
masterUrl, err := url.Parse(testContext.host)
masterUrl, err := url.Parse(testContext.Host)
expectNoError(err)
return masterUrl.Host
}
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/networking.go
Expand Up @@ -42,7 +42,7 @@ var _ = Describe("Networking", func() {
ns := "nettest-" + randomSuffix()

It("should function for pods", func() {
if testContext.provider == "vagrant" {
if testContext.Provider == "vagrant" {
By("Skipping test which is broken for vagrant (See https://github.com/GoogleCloudPlatform/kubernetes/issues/3580)")
return
}
Expand Down
28 changes: 14 additions & 14 deletions test/e2e/pd.go
Expand Up @@ -55,27 +55,27 @@ var _ = Describe("PD", func() {
})

It("should schedule a pod w/ a RW PD, remove it, then schedule it on another host", func() {
if testContext.provider != "gce" {
if testContext.Provider != "gce" {
By(fmt.Sprintf("Skipping PD test, which is only supported for provider gce (not %s)",
testContext.provider))
testContext.Provider))
return
}

host0Pod := testPDPod(diskName, host0Name, false)
host1Pod := testPDPod(diskName, host1Name, false)

By(fmt.Sprintf("creating PD %q", diskName))
expectNoError(createPD(diskName, testContext.gceConfig.Zone), "Error creating PD")
expectNoError(createPD(diskName, testContext.GCEConfig.Zone), "Error creating PD")

defer func() {
By("cleaning up PD-RW test environment")
// Teardown pods, PD. Ignore errors.
// Teardown should do nothing unless test failed.
podClient.Delete(host0Pod.Name)
podClient.Delete(host1Pod.Name)
detachPD(host0Name, diskName, testContext.gceConfig.Zone)
detachPD(host1Name, diskName, testContext.gceConfig.Zone)
deletePD(diskName, testContext.gceConfig.Zone)
detachPD(host0Name, diskName, testContext.GCEConfig.Zone)
detachPD(host1Name, diskName, testContext.GCEConfig.Zone)
deletePD(diskName, testContext.GCEConfig.Zone)
}()

By("submitting host0Pod to kubernetes")
Expand All @@ -98,7 +98,7 @@ var _ = Describe("PD", func() {

By(fmt.Sprintf("deleting PD %q", diskName))
for start := time.Now(); time.Since(start) < 180*time.Second; time.Sleep(5 * time.Second) {
if err = deletePD(diskName, testContext.gceConfig.Zone); err != nil {
if err = deletePD(diskName, testContext.GCEConfig.Zone); err != nil {
Logf("Couldn't delete PD. Sleeping 5 seconds")
continue
}
Expand All @@ -110,9 +110,9 @@ var _ = Describe("PD", func() {
})

It("should schedule a pod w/ a readonly PD on two hosts, then remove both.", func() {
if testContext.provider != "gce" {
if testContext.Provider != "gce" {
By(fmt.Sprintf("Skipping PD test, which is only supported for provider gce (not %s)",
testContext.provider))
testContext.Provider))
return
}

Expand All @@ -127,13 +127,13 @@ var _ = Describe("PD", func() {
podClient.Delete(rwPod.Name)
podClient.Delete(host0ROPod.Name)
podClient.Delete(host1ROPod.Name)
detachPD(host0Name, diskName, testContext.gceConfig.Zone)
detachPD(host1Name, diskName, testContext.gceConfig.Zone)
deletePD(diskName, testContext.gceConfig.Zone)
detachPD(host0Name, diskName, testContext.GCEConfig.Zone)
detachPD(host1Name, diskName, testContext.GCEConfig.Zone)
deletePD(diskName, testContext.GCEConfig.Zone)
}()

By(fmt.Sprintf("creating PD %q", diskName))
expectNoError(createPD(diskName, testContext.gceConfig.Zone), "Error creating PD")
expectNoError(createPD(diskName, testContext.GCEConfig.Zone), "Error creating PD")

By("submitting rwPod to ensure PD is formatted")
_, err := podClient.Create(rwPod)
Expand Down Expand Up @@ -161,7 +161,7 @@ var _ = Describe("PD", func() {

By(fmt.Sprintf("deleting PD %q", diskName))
for start := time.Now(); time.Since(start) < 180*time.Second; time.Sleep(5 * time.Second) {
if err = deletePD(diskName, testContext.gceConfig.Zone); err != nil {
if err = deletePD(diskName, testContext.GCEConfig.Zone); err != nil {
Logf("Couldn't delete PD. Sleeping 5 seconds")
continue
}
Expand Down
4 changes: 2 additions & 2 deletions test/e2e/rc.go
Expand Up @@ -44,12 +44,12 @@ var _ = Describe("ReplicationController", func() {
})

It("should serve a basic image on each replica with a private image", func() {
switch testContext.provider {
switch testContext.Provider {
case "gce", "gke":
ServeImageOrFail(c, "private", "gcr.io/_b_k8s_authenticated_test/serve_hostname:1.1")
default:
By(fmt.Sprintf("Skipping private variant, which is only supported for providers gce and gke (not %s)",
testContext.provider))
testContext.Provider))
}
})
})
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/service.go
Expand Up @@ -46,7 +46,7 @@ var _ = Describe("Services", func() {
})

It("should provide DNS for the cluster", func() {
if testContext.provider == "vagrant" {
if testContext.Provider == "vagrant" {
By("Skipping test which is broken for vagrant (See https://github.com/GoogleCloudPlatform/kubernetes/issues/3580)")
return
}
Expand Down