Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

remove deprecated Drain field from structs.Node #10202

Merged
merged 16 commits into from
Apr 1, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 1 addition & 14 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,20 +8,7 @@ IMPROVEMENTS:
* csi: Added support for jobs to request a unique volume ID per allocation. [[GH-10136](https://github.com/hashicorp/nomad/issues/10136)]
* driver/docker: Added support for optional extra container labels. [[GH-9885](https://github.com/hashicorp/nomad/issues/9885)]
* driver/docker: Added support for configuring default logger behavior in the client configuration. [[GH-10156](https://github.com/hashicorp/nomad/issues/10156)]

BUG FIXES:
* agent: Only allow querying Prometheus formatted metrics if Prometheus is enabled within the config [[GH-10140](https://github.com/hashicorp/nomad/pull/10140)]
* api: Added missing devices block to AllocatedTaskResources [[GH-10064](https://github.com/hashicorp/nomad/pull/10064)]
* cli: Fixed a bug where non-int proxy port would panic CLI [[GH-10072](https://github.com/hashicorp/nomad/issues/10072)]
* cli: Fixed a bug where `nomad operator debug` incorrectly parsed https Consul API URLs. [[GH-10082](https://github.com/hashicorp/nomad/pull/10082)]
* cli: Remove extra linefeeds in monitor.log files written by `nomad operator debug`. [[GH-10252](https://github.com/hashicorp/nomad/issues/10252)]
* client: Fixed log formatting when killing tasks. [[GH-10135](https://github.com/hashicorp/nomad/issues/10135)]
* csi: Fixed a bug where volume with IDs that are a substring prefix of another volume could use the wrong volume for feasibility checking. [[GH-10158](https://github.com/hashicorp/nomad/issues/10158)]
* scheduler: Fixed a bug where Nomad reports negative or incorrect running children counts for periodic jobs. [[GH-10145](https://github.com/hashicorp/nomad/issues/10145)]
* scheduler: Fixed a bug where jobs requesting multiple CSI volumes could be incorrectly scheduled if only one of the volumes passed feasibility checking. [[GH-10143](https://github.com/hashicorp/nomad/issues/10143)]
* ui: Fixed the rendering of interstitial components shown after processing a dynamic application sizing recommendation. [[GH-10094](https://github.com/hashicorp/nomad/pull/10094)]

## 1.0.5 (Unreleased)
* nomad/structs: Removed deprecated Node.Drain field, added API extensions to restore it [[GH-10202](https://github.com/hashicorp/nomad/issues/10202)]

BUG FIXES:
* agent: Only allow querying Prometheus formatted metrics if Prometheus is enabled within the config [[GH-10140](https://github.com/hashicorp/nomad/pull/10140)]
Expand Down
19 changes: 18 additions & 1 deletion api/event_stream_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import (
"time"

"github.com/hashicorp/nomad/api/internal/testutil"
"github.com/mitchellh/mapstructure"
"github.com/stretchr/testify/require"
)

Expand Down Expand Up @@ -148,9 +149,25 @@ func TestEventStream_PayloadValue(t *testing.T) {
require.NoError(t, err)
}
for _, e := range event.Events {
// verify that we get a node
n, err := e.Node()
require.NoError(t, err)
require.NotEqual(t, "", n.ID)
require.NotEmpty(t, n.ID)

// perform a raw decoding and look for:
// - "ID" to make sure that raw decoding is working correctly
// - "SecretID" to make sure it's not present
raw := make(map[string]map[string]interface{}, 0)
cfg := &mapstructure.DecoderConfig{
Result: &raw,
}
dec, err := mapstructure.NewDecoder(cfg)
require.NoError(t, err)
require.NoError(t, dec.Decode(e.Payload))
require.Contains(t, raw, "Node")
rawNode := raw["Node"]
require.Equal(t, n.ID, rawNode["ID"])
require.NotContains(t, rawNode, "SecretID")
}
case <-time.After(5 * time.Second):
require.Fail(t, "failed waiting for event stream event")
Expand Down
85 changes: 68 additions & 17 deletions api/nodes_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -178,6 +178,40 @@ func TestNodes_Info(t *testing.T) {
}
}

func TestNodes_NoSecretID(t *testing.T) {
t.Parallel()
c, s := makeClient(t, nil, func(c *testutil.TestServerConfig) {
c.DevMode = true
})
defer s.Stop()
nodes := c.Nodes()

// Get the node ID
var nodeID string
testutil.WaitForResult(func() (bool, error) {
out, _, err := nodes.List(nil)
if err != nil {
return false, err
}
if n := len(out); n != 1 {
return false, fmt.Errorf("expected 1 node, got: %d", n)
}
nodeID = out[0].ID
return true, nil
}, func(err error) {
t.Fatalf("err: %s", err)
})

// perform a raw http call and make sure that:
// - "ID" to make sure that raw decoding is working correctly
// - "SecretID" to make sure it's not present
resp := make(map[string]interface{})
_, err := c.query("/v1/node/"+nodeID, &resp, nil)
require.NoError(t, err)
require.Equal(t, nodeID, resp["ID"])
require.Empty(t, resp["SecretID"])
}

func TestNodes_ToggleDrain(t *testing.T) {
t.Parallel()
require := require.New(t)
Expand Down Expand Up @@ -206,9 +240,7 @@ func TestNodes_ToggleDrain(t *testing.T) {
// Check for drain mode
out, _, err := nodes.Info(nodeID, nil)
require.Nil(err)
if out.Drain {
t.Fatalf("drain mode should be off")
}
require.False(out.Drain)

// Toggle it on
spec := &DrainSpec{
Expand All @@ -218,11 +250,36 @@ func TestNodes_ToggleDrain(t *testing.T) {
require.Nil(err)
assertWriteMeta(t, &drainOut.WriteMeta)

// Check again
out, _, err = nodes.Info(nodeID, nil)
require.Nil(err)
if out.SchedulingEligibility != NodeSchedulingIneligible {
t.Fatalf("bad eligibility: %v vs %v", out.SchedulingEligibility, NodeSchedulingIneligible)
// Drain may have completed before we can check, use event stream
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

want to see Drain change from true to false

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is a nice pattern for testing... we can get a lot of mileage out of the event stream!

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this is awesome :D

ctx, cancel := context.WithCancel(context.Background())
defer cancel()

streamCh, err := c.EventStream().Stream(ctx, map[Topic][]string{
TopicNode: {nodeID},
}, 0, nil)
require.NoError(err)

// we expect to see the node change to Drain:true and then back to Drain:false+ineligible
var sawDraining, sawDrainComplete uint64
for sawDrainComplete == 0 {
select {
case events := <-streamCh:
require.NoError(events.Err)
for _, e := range events.Events {
node, err := e.Node()
require.NoError(err)
require.Equal(node.DrainStrategy != nil, node.Drain)
require.True(!node.Drain || node.SchedulingEligibility == NodeSchedulingIneligible) // node.Drain => "ineligible"
if node.Drain && node.SchedulingEligibility == NodeSchedulingIneligible {
cgbaker marked this conversation as resolved.
Show resolved Hide resolved
sawDraining = node.ModifyIndex
} else if sawDraining != 0 && node.ModifyIndex > sawDraining &&
!node.Drain && node.SchedulingEligibility == NodeSchedulingIneligible {
sawDrainComplete = node.ModifyIndex
}
}
case <-time.After(5 * time.Second):
require.Fail("failed waiting for event stream event")
}
}

// Toggle off again
Expand All @@ -233,15 +290,9 @@ func TestNodes_ToggleDrain(t *testing.T) {
// Check again
out, _, err = nodes.Info(nodeID, nil)
require.Nil(err)
if out.Drain {
t.Fatalf("drain mode should be off")
}
if out.DrainStrategy != nil {
t.Fatalf("drain strategy should be unset")
}
if out.SchedulingEligibility != NodeSchedulingEligible {
t.Fatalf("should be eligible")
}
require.False(out.Drain)
require.Nil(out.DrainStrategy)
require.Equal(NodeSchedulingEligible, out.SchedulingEligibility)
}

func TestNodes_ToggleEligibility(t *testing.T) {
Expand Down
5 changes: 4 additions & 1 deletion client/agent_endpoint.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,14 +8,17 @@ import (
"time"

"github.com/hashicorp/go-msgpack/codec"

"github.com/hashicorp/nomad/command/agent/host"
"github.com/hashicorp/nomad/command/agent/monitor"
"github.com/hashicorp/nomad/command/agent/pprof"
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/nomad/jsonhandles"
"github.com/hashicorp/nomad/nomad/structs"

metrics "github.com/armon/go-metrics"
log "github.com/hashicorp/go-hclog"

sframer "github.com/hashicorp/nomad/client/lib/streamframer"
cstructs "github.com/hashicorp/nomad/client/structs"
)
Expand Down Expand Up @@ -121,7 +124,7 @@ func (a *Agent) monitor(conn io.ReadWriteCloser) {
frames := make(chan *sframer.StreamFrame, streamFramesBuffer)
errCh := make(chan error)
var buf bytes.Buffer
frameCodec := codec.NewEncoder(&buf, structs.JsonHandle)
frameCodec := codec.NewEncoder(&buf, jsonhandles.JsonHandle)

framer := sframer.NewStreamFramer(frames, 1*time.Second, 200*time.Millisecond, 1024)
framer.Run()
Expand Down
4 changes: 3 additions & 1 deletion client/alloc_endpoint.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,12 @@ import (

metrics "github.com/armon/go-metrics"
"github.com/hashicorp/go-msgpack/codec"

"github.com/hashicorp/nomad/acl"
cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/helper/uuid"
"github.com/hashicorp/nomad/nomad/jsonhandles"
"github.com/hashicorp/nomad/nomad/structs"
nstructs "github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/plugins/drivers"
Expand Down Expand Up @@ -279,7 +281,7 @@ func newExecStream(decoder *codec.Decoder, encoder *codec.Encoder) drivers.ExecT

buf: buf,
encoder: encoder,
frameCodec: codec.NewEncoder(buf, structs.JsonHandle),
frameCodec: codec.NewEncoder(buf, jsonhandles.JsonHandle),
}
}

Expand Down
8 changes: 5 additions & 3 deletions client/fs_endpoint.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,15 @@ import (

metrics "github.com/armon/go-metrics"
"github.com/hashicorp/go-msgpack/codec"
"github.com/hpcloud/tail/watch"

"github.com/hashicorp/nomad/acl"
"github.com/hashicorp/nomad/client/allocdir"
sframer "github.com/hashicorp/nomad/client/lib/streamframer"
cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/nomad/jsonhandles"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hpcloud/tail/watch"
)

var (
Expand Down Expand Up @@ -237,7 +239,7 @@ func (f *FileSystem) stream(conn io.ReadWriteCloser) {
frames := make(chan *sframer.StreamFrame, streamFramesBuffer)
errCh := make(chan error)
var buf bytes.Buffer
frameCodec := codec.NewEncoder(&buf, structs.JsonHandle)
frameCodec := codec.NewEncoder(&buf, jsonhandles.JsonHandle)

// Create the framer
framer := sframer.NewStreamFramer(frames, streamHeartbeatRate, streamBatchWindow, streamFrameSize)
Expand Down Expand Up @@ -468,7 +470,7 @@ func (f *FileSystem) logs(conn io.ReadWriteCloser) {

var streamErr error
buf := new(bytes.Buffer)
frameCodec := codec.NewEncoder(buf, structs.JsonHandle)
frameCodec := codec.NewEncoder(buf, jsonhandles.JsonHandle)
OUTER:
for {
select {
Expand Down
8 changes: 5 additions & 3 deletions command/agent/http.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,10 +20,12 @@ import (
"github.com/hashicorp/go-connlimit"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-msgpack/codec"
"github.com/rs/cors"

"github.com/hashicorp/nomad/helper/noxssrw"
"github.com/hashicorp/nomad/helper/tlsutil"
"github.com/hashicorp/nomad/nomad/jsonhandles"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/rs/cors"
)

const (
Expand Down Expand Up @@ -499,13 +501,13 @@ func (s *HTTPServer) wrap(handler func(resp http.ResponseWriter, req *http.Reque
if obj != nil {
var buf bytes.Buffer
if prettyPrint {
enc := codec.NewEncoder(&buf, structs.JsonHandlePretty)
enc := codec.NewEncoder(&buf, jsonhandles.JsonHandlePretty)
err = enc.Encode(obj)
if err == nil {
buf.Write([]byte("\n"))
}
} else {
enc := codec.NewEncoder(&buf, structs.JsonHandle)
enc := codec.NewEncoder(&buf, jsonhandles.JsonHandleWithExtensions)
err = enc.Encode(obj)
}
if err != nil {
Expand Down
30 changes: 28 additions & 2 deletions command/agent/http_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,8 @@ import (
"github.com/hashicorp/nomad/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"

"github.com/hashicorp/nomad/nomad/jsonhandles"
)

// makeHTTPServer returns a test server whose logs will be written to
Expand Down Expand Up @@ -321,11 +323,11 @@ func testPrettyPrint(pretty string, prettyFmt bool, t *testing.T) {
var expected bytes.Buffer
var err error
if prettyFmt {
enc := codec.NewEncoder(&expected, structs.JsonHandlePretty)
enc := codec.NewEncoder(&expected, jsonhandles.JsonHandlePretty)
err = enc.Encode(r)
expected.WriteByte('\n')
} else {
enc := codec.NewEncoder(&expected, structs.JsonHandle)
enc := codec.NewEncoder(&expected, jsonhandles.JsonHandleWithExtensions)
err = enc.Encode(r)
}
if err != nil {
Expand Down Expand Up @@ -1295,6 +1297,30 @@ func Test_decodeBody(t *testing.T) {
}
}

// BenchmarkHTTPServer_JSONEncodingWithExtensions benchmarks the performance of
// encoding JSON objects using extensions
func BenchmarkHTTPServer_JSONEncodingWithExtensions(b *testing.B) {
benchmarkJsonEncoding(b, jsonhandles.JsonHandleWithExtensions)
}

// BenchmarkHTTPServer_JSONEncodingWithoutExtensions benchmarks the performance of
// encoding JSON objects using extensions
func BenchmarkHTTPServer_JSONEncodingWithoutExtensions(b *testing.B) {
benchmarkJsonEncoding(b, jsonhandles.JsonHandle)
}

func benchmarkJsonEncoding(b *testing.B, handle *codec.JsonHandle) {
n := mock.Node()
var buf bytes.Buffer

enc := codec.NewEncoder(&buf, handle)
for i := 0; i < b.N; i++ {
buf.Reset()
err := enc.Encode(n)
require.NoError(b, err)
}
}

func httpTest(t testing.TB, cb func(c *Config), f func(srv *TestAgent)) {
s := makeHTTPServer(t, cb)
defer s.Shutdown()
Expand Down
30 changes: 3 additions & 27 deletions command/agent/node_endpoint.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,7 @@ package agent

import (
"net/http"
"strconv"
"strings"
"time"

"github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/nomad/structs"
Expand Down Expand Up @@ -119,31 +117,9 @@ func (s *HTTPServer) nodeToggleDrain(resp http.ResponseWriter, req *http.Request

var drainRequest api.NodeUpdateDrainRequest

// COMPAT: Remove in 0.10. Allow the old style enable query param.
// Get the enable parameter
enableRaw := req.URL.Query().Get("enable")
var enable bool
if enableRaw != "" {
var err error
enable, err = strconv.ParseBool(enableRaw)
if err != nil {
return nil, CodedError(400, "invalid enable value")
}

// Use the force drain to have it keep the same behavior as old clients.
if enable {
drainRequest.DrainSpec = &api.DrainSpec{
Deadline: -1 * time.Second,
}
} else {
// If drain is disabled on an old client, mark the node as eligible for backwards compatibility
drainRequest.MarkEligible = true
}
} else {
err := decodeBody(req, &drainRequest)
if err != nil {
return nil, CodedError(400, err.Error())
}
err := decodeBody(req, &drainRequest)
if err != nil {
return nil, CodedError(400, err.Error())
}

args := structs.NodeUpdateDrainRequest{
Expand Down
Loading