Skip to content

Commit

Permalink
chore: upgrade Go to v1.19.3 (1.x) (#23941)
Browse files Browse the repository at this point in the history
* chore: upgrade Go to 1.19.3

This re-runs ./generate.sh and ./checkfmt.sh to format and update
source code (this is primarily responsible for the huge diff.)

* fix: update tests to reflect sorting algorithm change
  • Loading branch information
bnpfeife committed Nov 28, 2022
1 parent e68b64c commit e484c4d
Show file tree
Hide file tree
Showing 43 changed files with 141 additions and 159 deletions.
2 changes: 1 addition & 1 deletion .circleci/config.yml
Expand Up @@ -6,7 +6,7 @@ parameters:
# when updating the go version, should also update the go version in go.mod
description: docker tag for cross build container from quay.io . Created by https://github.com/influxdata/edge/tree/master/dockerfiles/cross-builder .
type: string
default: go1.18.7-f2a580ca8029f26f2c8a2002d6851967808bf96d
default: go1.19.3-7bc83382e2fdcefe13a8bf3e0367745901c0a790

workflow:
type: string
Expand Down
1 change: 0 additions & 1 deletion cmd/influx/cli/cli.go
Expand Up @@ -1461,7 +1461,6 @@ func (f *formatter) valueBuf(i, j int, typ flux.ColType, cr flux.ColReader) []by
// * common tags sorted by label
// * other tags sorted by label
// * value
//
type orderedCols struct {
indexMap []int
cols []flux.ColMeta
Expand Down
2 changes: 1 addition & 1 deletion cmd/influx_tools/internal/errlist/errlist.go
Expand Up @@ -22,7 +22,7 @@ func (el *ErrorList) Add(err error) {
el.errs = append(el.errs, err)
}

//Err returns whether or not an error list is an error.
// Err returns whether or not an error list is an error.
func (el *ErrorList) Err() error {
if len(el.errs) == 0 {
return nil
Expand Down
10 changes: 5 additions & 5 deletions cmd/influxd/run/command.go
Expand Up @@ -280,11 +280,11 @@ type Options struct {

// GetConfigPath returns the config path from the options.
// It will return a path by searching in this order:
// 1. The CLI option in ConfigPath
// 2. The environment variable INFLUXDB_CONFIG_PATH
// 3. The first influxdb.conf file on the path:
// - ~/.influxdb
// - /etc/influxdb
// 1. The CLI option in ConfigPath
// 2. The environment variable INFLUXDB_CONFIG_PATH
// 3. The first influxdb.conf file on the path:
// - ~/.influxdb
// - /etc/influxdb
func (opt *Options) GetConfigPath() string {
if opt.ConfigPath != "" {
if opt.ConfigPath == os.DevNull {
Expand Down
4 changes: 2 additions & 2 deletions coordinator/points_writer.go
Expand Up @@ -255,8 +255,8 @@ func (l sgList) Covers(t time.Time) bool {
// to start time. Therefore, if there are multiple shard groups that match
// this point's time they will be preferred in this order:
//
// - a shard group with the earliest end time;
// - (assuming identical end times) the shard group with the earliest start time.
// - a shard group with the earliest end time;
// - (assuming identical end times) the shard group with the earliest start time.
func (l sgList) ShardGroupAt(t time.Time) *meta.ShardGroupInfo {
if l.items.Len() == 0 {
return nil
Expand Down
4 changes: 0 additions & 4 deletions flux/stdlib/influxdata/influxdb/rules.go
Expand Up @@ -671,10 +671,8 @@ func (SortedPivotRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bo
return pn, false, nil
}

//
// Push Down of window aggregates.
// ReadRangePhys |> window |> { min, max, mean, count, sum }
//
type PushDownWindowAggregateRule struct{}

func (PushDownWindowAggregateRule) Name() string {
Expand Down Expand Up @@ -863,10 +861,8 @@ func (p PushDownBareAggregateRule) Rewrite(ctx context.Context, pn plan.Node) (p
}), true, nil
}

//
// Push Down of group aggregates.
// ReadGroupPhys |> { count }
//
type PushDownGroupAggregateRule struct{}

func (PushDownGroupAggregateRule) Name() string {
Expand Down
2 changes: 0 additions & 2 deletions flux/stdlib/influxdata/influxdb/rules_test.go
Expand Up @@ -1170,9 +1170,7 @@ func meanProcedureSpec() *universe.MeanProcedureSpec {
}
}

//
// Window Aggregate Testing
//
func TestPushDownWindowAggregateRule(t *testing.T) {
createRangeSpec := func() *influxdb.ReadRangePhysSpec {
return &influxdb.ReadRangePhysSpec{
Expand Down
2 changes: 1 addition & 1 deletion go.mod
@@ -1,6 +1,6 @@
module github.com/influxdata/influxdb

go 1.18
go 1.19

require (
collectd.org v0.3.0
Expand Down
37 changes: 22 additions & 15 deletions kit/platform/errors/errors.go
Expand Up @@ -39,24 +39,31 @@ const (
// further help operators.
//
// To create a simple error,
// &Error{
// Code:ENotFound,
// }
//
// &Error{
// Code:ENotFound,
// }
//
// To show where the error happens, add Op.
// &Error{
// Code: ENotFound,
// Op: "bolt.FindUserByID"
// }
//
// &Error{
// Code: ENotFound,
// Op: "bolt.FindUserByID"
// }
//
// To show an error with a unpredictable value, add the value in Msg.
// &Error{
// Code: EConflict,
// Message: fmt.Sprintf("organization with name %s already exist", aName),
// }
//
// &Error{
// Code: EConflict,
// Message: fmt.Sprintf("organization with name %s already exist", aName),
// }
//
// To show an error wrapped with another error.
// &Error{
// Code:EInternal,
// Err: err,
// }.
//
// &Error{
// Code:EInternal,
// Err: err,
// }.
type Error struct {
Code string
Msg string
Expand Down
24 changes: 13 additions & 11 deletions kit/tracing/tracing.go
Expand Up @@ -19,7 +19,8 @@ import (

// LogError adds a span log for an error.
// Returns unchanged error, so useful to wrap as in:
// return 0, tracing.LogError(err)
//
// return 0, tracing.LogError(err)
func LogError(span opentracing.Span, err error) error {
if err == nil {
return nil
Expand Down Expand Up @@ -115,24 +116,25 @@ func (s *Span) Finish() {
// Context without parent span reference triggers root span construction.
// This function never returns nil values.
//
// Performance
// # Performance
//
// This function incurs a small performance penalty, roughly 1000 ns/op, 376 B/op, 6 allocs/op.
// Jaeger timestamp and duration precision is only µs, so this is pretty negligible.
//
// Alternatives
// # Alternatives
//
// If this performance penalty is too much, try these, which are also demonstrated in benchmark tests:
// // Create a root span
// span := opentracing.StartSpan("operation name")
// ctx := opentracing.ContextWithSpan(context.Background(), span)
//
// // Create a child span
// span := opentracing.StartSpan("operation name", opentracing.ChildOf(sc))
// ctx := opentracing.ContextWithSpan(context.Background(), span)
// // Create a root span
// span := opentracing.StartSpan("operation name")
// ctx := opentracing.ContextWithSpan(context.Background(), span)
//
// // Create a child span
// span := opentracing.StartSpan("operation name", opentracing.ChildOf(sc))
// ctx := opentracing.ContextWithSpan(context.Background(), span)
//
// // Sugar to create a child span
// span, ctx := opentracing.StartSpanFromContext(ctx, "operation name")
// // Sugar to create a child span
// span, ctx := opentracing.StartSpanFromContext(ctx, "operation name")
func StartSpanFromContext(ctx context.Context, opts ...opentracing.StartSpanOption) (opentracing.Span, context.Context) {
if ctx == nil {
panic("StartSpanFromContext called with nil context")
Expand Down
6 changes: 3 additions & 3 deletions monitor/diagnostics/diagnostics.go
Expand Up @@ -23,9 +23,9 @@ func (f ClientFunc) Diagnostics() (*Diagnostics, error) {
// the values for each column, by row. This information is never written to an InfluxDB
// system and is display-only. An example showing, say, connections follows:
//
// source_ip source_port dest_ip dest_port
// 182.1.0.2 2890 127.0.0.1 38901
// 174.33.1.2 2924 127.0.0.1 38902
// source_ip source_port dest_ip dest_port
// 182.1.0.2 2890 127.0.0.1 38901
// 174.33.1.2 2924 127.0.0.1 38902
type Diagnostics struct {
Columns []string
Rows [][]interface{}
Expand Down
6 changes: 4 additions & 2 deletions pkg/encoding/simple8b/encoding.go
Expand Up @@ -468,8 +468,9 @@ func Decode(dst *[240]uint64, v uint64) (n int, err error) {

// Decode writes the uncompressed values from src to dst. It returns the number
// of values written or an error.
//go:nocheckptr
// nocheckptr while the underlying struct layout doesn't change
//
//go:nocheckptr
func DecodeAll(dst, src []uint64) (value int, err error) {
j := 0
for _, v := range src {
Expand All @@ -482,8 +483,9 @@ func DecodeAll(dst, src []uint64) (value int, err error) {

// DecodeBytesBigEndian writes the compressed, big-endian values from src to dst. It returns the number
// of values written or an error.
//go:nocheckptr
// nocheckptr while the underlying struct layout doesn't change
//
//go:nocheckptr
func DecodeBytesBigEndian(dst []uint64, src []byte) (value int, err error) {
if len(src)&7 != 0 {
return 0, errors.New("src length is not multiple of 8")
Expand Down
11 changes: 6 additions & 5 deletions pkg/errors/error_capture.go
Expand Up @@ -2,11 +2,12 @@ package errors

// Capture is a wrapper function which can be used to capture errors from closing via a defer.
// An example:
// func Example() (err error) {
// f, _ := os.Open(...)
// defer errors.Capture(&err, f.Close)()
// ...
// return
//
// func Example() (err error) {
// f, _ := os.Open(...)
// defer errors.Capture(&err, f.Close)()
// ...
// return
//
// Doing this will result in the error from the f.Close() call being
// put in the error via a ptr, if the error is not nil
Expand Down
8 changes: 4 additions & 4 deletions pkg/estimator/hll/hll.go
Expand Up @@ -4,10 +4,10 @@
//
// The differences are that the implementation in this package:
//
// * uses an AMD64 optimised xxhash algorithm instead of murmur;
// * uses some AMD64 optimisations for things like clz;
// * works with []byte rather than a Hash64 interface, to reduce allocations;
// * implements encoding.BinaryMarshaler and encoding.BinaryUnmarshaler
// - uses an AMD64 optimised xxhash algorithm instead of murmur;
// - uses some AMD64 optimisations for things like clz;
// - works with []byte rather than a Hash64 interface, to reduce allocations;
// - implements encoding.BinaryMarshaler and encoding.BinaryUnmarshaler
//
// Based on some rough benchmarking, this implementation of HyperLogLog++ is
// around twice as fast as the github.com/clarkduvall/hyperloglog implementation.
Expand Down
2 changes: 1 addition & 1 deletion pkg/tar/stream.go
Expand Up @@ -62,7 +62,7 @@ func StreamFile(f os.FileInfo, shardRelativePath, fullPath string, tw *tar.Write
return StreamRenameFile(f, f.Name(), shardRelativePath, fullPath, tw)
}

/// Stream a single file to tw, using tarHeaderFileName instead of the actual filename
// / Stream a single file to tw, using tarHeaderFileName instead of the actual filename
// e.g., when we want to write a *.tmp file using the original file's non-tmp name.
func StreamRenameFile(f os.FileInfo, tarHeaderFileName, relativePath, fullPath string, tw *tar.Writer) error {
h, err := tar.FileInfoHeader(f, f.Name())
Expand Down
3 changes: 1 addition & 2 deletions pkg/tracing/doc.go
Expand Up @@ -3,7 +3,7 @@ Package tracing provides a way for capturing hierarchical traces.
To start a new trace with a root span named select
trace, span := tracing.NewTrace("select")
trace, span := tracing.NewTrace("select")
It is recommended that a span be forwarded to callees using the
context package. Firstly, create a new context with the span associated
Expand All @@ -21,6 +21,5 @@ Once the trace is complete, it may be converted to a graph with the Tree method.
The tree is intended to be used with the Walk function in order to generate
different presentations. The default Tree#String method returns a tree.
*/
package tracing
2 changes: 1 addition & 1 deletion pkg/tracing/fields/field.go
Expand Up @@ -50,7 +50,7 @@ func Bool(key string, val bool) Field {
}
}

/// Int64 adds an int64-valued key:value pair to a Span.LogFields() record
// / Int64 adds an int64-valued key:value pair to a Span.LogFields() record
func Int64(key string, val int64) Field {
return Field{
key: key,
Expand Down
1 change: 0 additions & 1 deletion pkg/tracing/wire/binary.go
@@ -1,6 +1,5 @@
/*
Package wire is used to serialize a trace.
*/
package wire

Expand Down
1 change: 1 addition & 0 deletions pkg/tracing/wire/binary.pb.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

8 changes: 5 additions & 3 deletions query/encode.go
Expand Up @@ -60,9 +60,11 @@ func (e *NoContentEncoder) Encode(w io.Writer, results flux.ResultIterator) (int
// Otherwise one can decode the response body to get the error. For example:
// ```
// _, err = csv.NewResultDecoder(csv.ResultDecoderConfig{}).Decode(bytes.NewReader(res))
// if err != nil {
// // we got some runtime error
// }
//
// if err != nil {
// // we got some runtime error
// }
//
// ```
type NoContentWithErrorDialect struct {
csv.ResultEncoderConfig
Expand Down
4 changes: 2 additions & 2 deletions query/functions.go
Expand Up @@ -1212,8 +1212,8 @@ func (r *UnsignedCumulativeSumReducer) Emit() []UnsignedPoint {

// FloatHoltWintersReducer forecasts a series into the future.
// This is done using the Holt-Winters damped method.
// 1. Using the series the initial values are calculated using a SSE.
// 2. The series is forecasted into the future using the iterative relations.
// 1. Using the series the initial values are calculated using a SSE.
// 2. The series is forecasted into the future using the iterative relations.
type FloatHoltWintersReducer struct {
// Season period
m int
Expand Down

0 comments on commit e484c4d

Please sign in to comment.