Skip to content

Commit

Permalink
fix side effect on unit tests
Browse files Browse the repository at this point in the history
  • Loading branch information
henrod committed Feb 25, 2018
1 parent 8746817 commit cdbc087
Show file tree
Hide file tree
Showing 2 changed files with 26 additions and 20 deletions.
39 changes: 24 additions & 15 deletions api/scheduler_handler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1945,16 +1945,19 @@ ports:
})

It("should update image", func() {
var configYaml models.ConfigYAML
err = yaml.Unmarshal([]byte(yamlString), &configYaml)

newImageName := "new-image"
pods, err := clientset.CoreV1().Pods(configYaml1.Name).List(metav1.ListOptions{})
pods, err := clientset.CoreV1().Pods(configYaml.Name).List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(pods.Items).To(HaveLen(configYaml1.AutoScaling.Min))
Expect(pods.Items).To(HaveLen(configYaml.AutoScaling.Min))

// Update scheduler
body := map[string]interface{}{"image": newImageName}
bts, _ := json.Marshal(body)
reader := strings.NewReader(string(bts))
url := fmt.Sprintf("/scheduler/%s/image?maxSurge=100", configYaml1.Name)
url := fmt.Sprintf("/scheduler/%s/image", configYaml.Name)
request, err = http.NewRequest("PUT", url, reader)
Expect(err).NotTo(HaveOccurred())
request.SetBasicAuth(user, pass)
Expand All @@ -1966,11 +1969,11 @@ ports:
MockRedisLock(mockRedisClient, lockKeyNs, lockTimeoutMs, true, nil)

// Remove old rooms
MockRemoveRoomsFromRedis(mockRedisClient, mockPipeline, pods, &configYaml1)
MockRemoveRoomsFromRedis(mockRedisClient, mockPipeline, pods, &configYaml)

// Create new roome
// It will use the same number of rooms as config1, and ScaleUp to new min in Watcher at AutoScale
MockCreateRooms(mockRedisClient, mockPipeline, &configYaml1)
MockCreateRooms(mockRedisClient, mockPipeline, &configYaml)

// Update new config on schedulers table
MockUpdateSchedulersTable(mockDb, nil)
Expand All @@ -1991,16 +1994,19 @@ ports:
})

It("should update image with max surge of 100%", func() {
var configYaml models.ConfigYAML
err = yaml.Unmarshal([]byte(yamlString), &configYaml)

newImageName := "new-image"
pods, err := clientset.CoreV1().Pods(configYaml1.Name).List(metav1.ListOptions{})
pods, err := clientset.CoreV1().Pods(configYaml.Name).List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(pods.Items).To(HaveLen(configYaml1.AutoScaling.Min))
Expect(pods.Items).To(HaveLen(configYaml.AutoScaling.Min))

// Update scheduler
body := map[string]interface{}{"image": newImageName}
bts, _ := json.Marshal(body)
reader := strings.NewReader(string(bts))
url := fmt.Sprintf("/scheduler/%s/image?maxsurge=100", configYaml1.Name)
url := fmt.Sprintf("/scheduler/%s/image?maxsurge=100", configYaml.Name)
request, err = http.NewRequest("PUT", url, reader)
Expect(err).NotTo(HaveOccurred())
request.SetBasicAuth(user, pass)
Expand All @@ -2012,11 +2018,11 @@ ports:
MockRedisLock(mockRedisClient, lockKeyNs, lockTimeoutMs, true, nil)

// Remove old rooms
MockRemoveRoomsFromRedis(mockRedisClient, mockPipeline, pods, &configYaml1)
MockRemoveRoomsFromRedis(mockRedisClient, mockPipeline, pods, &configYaml)

// Create new roome
// It will use the same number of rooms as config1, and ScaleUp to new min in Watcher at AutoScale
MockCreateRooms(mockRedisClient, mockPipeline, &configYaml1)
MockCreateRooms(mockRedisClient, mockPipeline, &configYaml)

// Update new config on schedulers table
MockUpdateSchedulersTable(mockDb, nil)
Expand Down Expand Up @@ -2293,6 +2299,9 @@ ports:
})

It("should set image if basicauth is not sent and tryOauthIfUnset is true", func() {
var configYaml models.ConfigYAML
err = yaml.Unmarshal([]byte(yamlString), &configYaml)

config, err := GetDefaultConfig()
Expect(err).NotTo(HaveOccurred())
config.Set("basicauth.tryOauthIfUnset", true)
Expand All @@ -2302,15 +2311,15 @@ ports:
app.Login = mockLogin
newImageName := "new-image"

pods, err := clientset.CoreV1().Pods(configYaml1.Name).List(metav1.ListOptions{})
pods, err := clientset.CoreV1().Pods(configYaml.Name).List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(pods.Items).To(HaveLen(configYaml1.AutoScaling.Min))
Expect(pods.Items).To(HaveLen(configYaml.AutoScaling.Min))

// Update scheduler
body := map[string]interface{}{"image": newImageName}
bts, _ := json.Marshal(body)
reader := strings.NewReader(string(bts))
url := fmt.Sprintf("/scheduler/%s/image", configYaml1.Name)
url := fmt.Sprintf("/scheduler/%s/image", configYaml.Name)
request, err = http.NewRequest("PUT", url, reader)
Expect(err).NotTo(HaveOccurred())
request.SetBasicAuth(user, pass)
Expand All @@ -2322,11 +2331,11 @@ ports:
MockRedisLock(mockRedisClient, lockKeyNs, lockTimeoutMs, true, nil)

// Remove old rooms
MockRemoveRoomsFromRedis(mockRedisClient, mockPipeline, pods, &configYaml1)
MockRemoveRoomsFromRedis(mockRedisClient, mockPipeline, pods, &configYaml)

// Create new roome
// It will use the same number of rooms as config1, and ScaleUp to new min in Watcher at AutoScale
MockCreateRooms(mockRedisClient, mockPipeline, &configYaml1)
MockCreateRooms(mockRedisClient, mockPipeline, &configYaml)

// Update new config on schedulers table
MockUpdateSchedulersTable(mockDb, nil)
Expand Down
7 changes: 2 additions & 5 deletions controller/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,7 @@ func createPodsAsTheyAreDeleted(
})

createdPods = []v1.Pod{}
logger.Debugf("waiting for pods to terminate: %#v", names(deletedPods))
logger.Debugf("pods to terminate: %#v", names(deletedPods))

timeoutTimer := time.NewTimer(timeout)
defer timeoutTimer.Stop()
Expand All @@ -191,10 +191,7 @@ func createPodsAsTheyAreDeleted(
case <-ticker.C:
for j := i; j < len(deletedPods); j++ {
pod := deletedPods[i]
_, err := clientset.CoreV1().Pods(configYAML.Name).Get(
pod.GetName(), getOptions,
)

_, err := clientset.CoreV1().Pods(configYAML.Name).Get(pod.GetName(), getOptions)
if err == nil || !strings.Contains(err.Error(), "not found") {
logger.WithField("pod", pod.GetName()).Debugf("pod still exists")
exit = false
Expand Down

0 comments on commit cdbc087

Please sign in to comment.