Skip to content

Commit

Permalink
Create pod and delete old one after in each chunk
Browse files Browse the repository at this point in the history
  • Loading branch information
Luiz Felipe Takakura committed Aug 7, 2019
1 parent fee3c42 commit 0f68d5c
Show file tree
Hide file tree
Showing 5 changed files with 120 additions and 105 deletions.
60 changes: 30 additions & 30 deletions api/scheduler_handler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ var _ = Describe("Scheduler Handler", func() {
},
"shutdownTimeout": 180,
"autoscaling": {
"min": 100,
"min": 10,
"up": {
"delta": 10,
"trigger": {
Expand Down Expand Up @@ -293,7 +293,7 @@ var _ = Describe("Scheduler Handler", func() {

app.Router.ServeHTTP(recorder, request)
Expect(recorder.Code).To(Equal(http.StatusOK))
Expect(recorder.Body.String()).To(Equal(`[{"autoscalingDownTriggerUsage":50,"autoscalingMin":100,"autoscalingUpTriggerUsage":70,"game":"game-name","name":"scheduler1","roomsCreating":2,"roomsOccupied":1,"roomsReady":1,"roomsTerminating":0,"state":"in-sync"}]`))
Expect(recorder.Body.String()).To(Equal(`[{"autoscalingDownTriggerUsage":50,"autoscalingMin":10,"autoscalingUpTriggerUsage":70,"game":"game-name","name":"scheduler1","roomsCreating":2,"roomsOccupied":1,"roomsReady":1,"roomsTerminating":0,"state":"in-sync"}]`))
})

It("should list empty array when there aren't schedulers", func() {
Expand All @@ -318,23 +318,23 @@ var _ = Describe("Scheduler Handler", func() {
Context("when all services are healthy", func() {
It("returns a status code of 201 and success body", func() {
mockRedisTraceWrapper.EXPECT().WithContext(gomock.Any(), mockRedisClient).Return(mockRedisClient)
mockRedisClient.EXPECT().TxPipeline().Return(mockPipeline).Times(100)
mockRedisClient.EXPECT().TxPipeline().Return(mockPipeline).Times(10)
mockPipeline.EXPECT().HMSet(gomock.Any(), gomock.Any()).Do(
func(schedulerName string, statusInfo map[string]interface{}) {
Expect(statusInfo["status"]).To(Equal(models.StatusCreating))
Expect(statusInfo["lastPing"]).To(BeNumerically("~", time.Now().Unix(), 1))
},
).Times(100)
mockPipeline.EXPECT().ZAdd(models.GetRoomPingRedisKey("scheduler-name"), gomock.Any()).Times(100)
mockPipeline.EXPECT().SAdd(models.GetRoomStatusSetRedisKey("scheduler-name", "creating"), gomock.Any()).Times(100)
mockPipeline.EXPECT().Exec().Times(100)
).Times(10)
mockPipeline.EXPECT().ZAdd(models.GetRoomPingRedisKey("scheduler-name"), gomock.Any()).Times(10)
mockPipeline.EXPECT().SAdd(models.GetRoomStatusSetRedisKey("scheduler-name", "creating"), gomock.Any()).Times(10)
mockPipeline.EXPECT().Exec().Times(10)
MockInsertScheduler(mockDb, nil)
MockUpdateScheduler(mockDb, nil, nil)

mockRedisClient.EXPECT().
Get(models.GlobalPortsPoolKey).
Return(goredis.NewStringResult(workerPortRange, nil)).
Times(100)
Times(10)

var configYaml1 models.ConfigYAML
err := yaml.Unmarshal([]byte(yamlString), &configYaml1)
Expand Down Expand Up @@ -564,16 +564,16 @@ autoscaling:

It("forwards scheduler event", func() {
mockRedisTraceWrapper.EXPECT().WithContext(gomock.Any(), mockRedisClient).Return(mockRedisClient)
mockRedisClient.EXPECT().TxPipeline().Return(mockPipeline).Times(100)
mockRedisClient.EXPECT().TxPipeline().Return(mockPipeline).Times(10)
mockPipeline.EXPECT().HMSet(gomock.Any(), gomock.Any()).Do(
func(schedulerName string, statusInfo map[string]interface{}) {
Expect(statusInfo["status"]).To(Equal(models.StatusCreating))
Expect(statusInfo["lastPing"]).To(BeNumerically("~", time.Now().Unix(), 1))
},
).Times(100)
mockPipeline.EXPECT().ZAdd(models.GetRoomPingRedisKey("scheduler-name"), gomock.Any()).Times(100)
mockPipeline.EXPECT().SAdd(models.GetRoomStatusSetRedisKey("scheduler-name", "creating"), gomock.Any()).Times(100)
mockPipeline.EXPECT().Exec().Times(100)
).Times(10)
mockPipeline.EXPECT().ZAdd(models.GetRoomPingRedisKey("scheduler-name"), gomock.Any()).Times(10)
mockPipeline.EXPECT().SAdd(models.GetRoomStatusSetRedisKey("scheduler-name", "creating"), gomock.Any()).Times(10)
mockPipeline.EXPECT().Exec().Times(10)

mockDb.EXPECT().Query(
gomock.Any(),
Expand All @@ -589,7 +589,7 @@ autoscaling:
MockInsertScheduler(mockDb, nil)
MockUpdateScheduler(mockDb, nil, nil)
mockRedisClient.EXPECT().Get(models.GlobalPortsPoolKey).
Return(goredis.NewStringResult(workerPortRange, nil)).Times(100)
Return(goredis.NewStringResult(workerPortRange, nil)).Times(10)

var configYaml1 models.ConfigYAML
err := yaml.Unmarshal([]byte(yamlString), &configYaml1)
Expand Down Expand Up @@ -699,7 +699,7 @@ autoscaling:
},
"shutdownTimeout": 180,
"autoscaling": {
"min": 100,
"min": 10,
"up": {
"delta": 10,
"trigger": {
Expand Down Expand Up @@ -779,8 +779,8 @@ autoscaling:

// Create new roome
// It will use the same number of rooms as config1, and ScaleUp to new min in Watcher at AutoScale
MockCreateRooms(mockRedisClient, mockPipeline, &configYaml)
MockGetPortsFromPool(&configYaml, mockRedisClient, nil, workerPortRange, portStart, portEnd)
MockCreateRooms(mockRedisClient, mockPipeline, &configYaml, 0)
MockGetPortsFromPool(&configYaml, mockRedisClient, nil, workerPortRange, portStart, portEnd, 0)

// Update new config on schedulers table
MockUpdateSchedulersTable(mockDb, nil)
Expand Down Expand Up @@ -1111,8 +1111,8 @@ autoscaling:

// Create new roome
// It will use the same number of rooms as config1, and ScaleUp to new min in Watcher at AutoScale
MockCreateRooms(mockRedisClient, mockPipeline, &configYaml)
MockGetPortsFromPool(&configYaml, mockRedisClient, nil, workerPortRange, portStart, portEnd)
MockCreateRooms(mockRedisClient, mockPipeline, &configYaml, 0)
MockGetPortsFromPool(&configYaml, mockRedisClient, nil, workerPortRange, portStart, portEnd, 0)

// Update new config on schedulers table
MockUpdateSchedulersTable(mockDb, nil)
Expand Down Expand Up @@ -1202,8 +1202,8 @@ autoscaling:

// Create new roome
// It will use the same number of rooms as config1, and ScaleUp to new min in Watcher at AutoScale
MockCreateRooms(mockRedisClient, mockPipeline, &configYaml)
MockGetPortsFromPool(&configYaml, mockRedisClient, nil, workerPortRange, portStart, portEnd)
MockCreateRooms(mockRedisClient, mockPipeline, &configYaml, 0)
MockGetPortsFromPool(&configYaml, mockRedisClient, nil, workerPortRange, portStart, portEnd, 0)

// Update new config on schedulers table
MockUpdateSchedulersTable(mockDb, errors.New("err on db"))
Expand Down Expand Up @@ -2381,8 +2381,8 @@ game: game-name

// Create new roome
// It will use the same number of rooms as config1, and ScaleUp to new min in Watcher at AutoScale
MockCreateRooms(mockRedisClient, mockPipeline, &configYaml)
MockGetPortsFromPool(&configYaml, mockRedisClient, nil, workerPortRange, portStart, portEnd)
MockCreateRooms(mockRedisClient, mockPipeline, &configYaml, 0)
MockGetPortsFromPool(&configYaml, mockRedisClient, nil, workerPortRange, portStart, portEnd, 0)

// Update new config on schedulers table
MockUpdateSchedulersTable(mockDb, nil)
Expand Down Expand Up @@ -2439,8 +2439,8 @@ game: game-name

// Create new roome
// It will use the same number of rooms as config1, and ScaleUp to new min in Watcher at AutoScale
MockCreateRooms(mockRedisClient, mockPipeline, &configYaml)
MockGetPortsFromPool(&configYaml, mockRedisClient, nil, workerPortRange, portStart, portEnd)
MockCreateRooms(mockRedisClient, mockPipeline, &configYaml, 0)
MockGetPortsFromPool(&configYaml, mockRedisClient, nil, workerPortRange, portStart, portEnd, 0)

// Update new config on schedulers table
MockUpdateSchedulersTable(mockDb, nil)
Expand Down Expand Up @@ -2780,8 +2780,8 @@ game: game-name

// Create new roome
// It will use the same number of rooms as config1, and ScaleUp to new min in Watcher at AutoScale
MockCreateRooms(mockRedisClient, mockPipeline, &configYaml)
MockGetPortsFromPool(&configYaml, mockRedisClient, nil, workerPortRange, portStart, portEnd)
MockCreateRooms(mockRedisClient, mockPipeline, &configYaml, 0)
MockGetPortsFromPool(&configYaml, mockRedisClient, nil, workerPortRange, portStart, portEnd, 0)

// Update new config on schedulers table
MockUpdateSchedulersTable(mockDb, nil)
Expand Down Expand Up @@ -2873,7 +2873,7 @@ game: game-name

// Create new roome
// It will use the same number of rooms as config1, and ScaleUp to new min in Watcher at AutoScale
MockCreateRooms(mockRedisClient, mockPipeline, &configYaml1)
MockCreateRooms(mockRedisClient, mockPipeline, &configYaml1, 0)

// Update new config on schedulers table
MockUpdateSchedulersTable(mockDb, nil)
Expand Down Expand Up @@ -2954,8 +2954,8 @@ game: game-name

// Create new roome
// It will use the same number of rooms as config1, and ScaleUp to new min in Watcher at AutoScale
MockCreateRooms(mockRedisClient, mockPipeline, &configYaml)
MockGetPortsFromPool(&configYaml, mockRedisClient, nil, workerPortRange, portStart, portEnd)
MockCreateRooms(mockRedisClient, mockPipeline, &configYaml, 0)
MockGetPortsFromPool(&configYaml, mockRedisClient, nil, workerPortRange, portStart, portEnd, 0)

// Update new config on schedulers table
MockUpdateSchedulersTable(mockDb, nil)
Expand Down
2 changes: 1 addition & 1 deletion controller/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -693,7 +693,7 @@ waitForLock:
deletedPods := []v1.Pod{}

for i, chunk := range podChunks {
l.Debugf("deleting chunk %d: %v", i, names(chunk))
l.Debugf("updating chunk %d: %v", i, names(chunk))

newlyCreatedPods, newlyDeletedPods, timedout, canceled := replacePodsAndWait(
l, roomManager, mr, clientset, db, redisClient.Client,
Expand Down
Loading

0 comments on commit 0f68d5c

Please sign in to comment.