Skip to content

Commit

Permalink
PR: Fix all linter gofmt errors for v1.19
Browse files Browse the repository at this point in the history
  • Loading branch information
shahzadlone committed Feb 2, 2023
1 parent 50566df commit 7eb225b
Show file tree
Hide file tree
Showing 13 changed files with 98 additions and 78 deletions.
1 change: 0 additions & 1 deletion config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,6 @@ How to use, e.g. without using a rootdir:
err := cfg.LoadWithoutRootDir()
if err != nil {
...
*/
package config

Expand Down
11 changes: 8 additions & 3 deletions db/collection.go
Original file line number Diff line number Diff line change
Expand Up @@ -886,11 +886,16 @@ func (c *collection) getTxn(ctx context.Context, readonly bool) (datastore.Txn,
}

// discardImplicitTxn is a proxy function used by the collection to execute the Discard()
// transaction function only if its an implicit transaction.
//
// transaction function only if its an implicit transaction.
//
// Implicit transactions are transactions that are created *during* an operation
// execution as a side effect.
//
// execution as a side effect.
//
// Explicit transactions are provided to the collection object via
// the "WithTxn(...)" function.
//
// the "WithTxn(...)" function.
func (c *collection) discardImplicitTxn(ctx context.Context, txn datastore.Txn) {
if c.txn == nil {
txn.Discard(ctx)
Expand Down
31 changes: 18 additions & 13 deletions db/collection_delete.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,9 @@ import (
)

// DeleteWith deletes a target document. Target can be a Filter statement,
// a single docKey, a single document, an array of docKeys, or an array of documents.
//
// a single docKey, a single document, an array of docKeys, or an array of documents.
//
// If you want more type safety, use the respective typed versions of Delete.
// Eg: DeleteWithFilter or DeleteWithKey
func (c *collection) DeleteWith(
Expand Down Expand Up @@ -251,15 +253,17 @@ func newDagDeleter(bstore datastore.DAGStore) dagDeleter {
}

// Here is what our db stores look like:
// /db
// -> block /blocks => /db/blocks
// -> datastore /data => /db/data
// -> headstore /heads => /db/heads
// -> systemstore /system => /db/system
//
// /db
// -> block /blocks => /db/blocks
// -> datastore /data => /db/data
// -> headstore /heads => /db/heads
// -> systemstore /system => /db/system
//
// For the delete operation we are concerned with:
// 1) Deleting the actual blocks (blockstore).
// 2) Deleting datastore state.
// 3) Deleting headstore state.
// 1. Deleting the actual blocks (blockstore).
// 2. Deleting datastore state.
// 3. Deleting headstore state.
func (c *collection) applyFullDelete(
ctx context.Context,
txn datastore.Txn, dockey core.PrimaryDataStoreKey) error {
Expand Down Expand Up @@ -353,10 +357,11 @@ func (d dagDeleter) run(ctx context.Context, targetCid cid.Cid) error {
return d.delete(ctx, targetCid, block)
}

// (ipld.Block
// (ipldProtobufNode{
// Data: (cbor(crdt deltaPayload)),
// Links: (_head => parentCid, fieldName => fieldCid)))
// (ipld.Block
//
// (ipldProtobufNode{
// Data: (cbor(crdt deltaPayload)),
// Links: (_head => parentCid, fieldName => fieldCid)))
func (d dagDeleter) delete(
ctx context.Context,
targetCid cid.Cid,
Expand Down
16 changes: 8 additions & 8 deletions db/fetcher/versioned.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,13 +38,13 @@ var (
// to a specific version in the documents history graph, and return the fetched
// state at that point exactly.
//
// Given the following Document state graph
// # Given the following Document state graph
//
// {} --> V1 --> V2 --> V3 --> V4
//
// ^ ^
// | |
// Target Version Current State
//
// Target Version Current State
//
// A regular DocumentFetcher fetches and returns the state at V4, but the
// VersionsedFetcher would step backwards through the update graph, recompose
Expand All @@ -64,11 +64,11 @@ var (
// the scanNode request planner system.
//
// Current limitations:
// - We can only return a single record from an VersionedFetcher
// instance.
// - We can't request related sub objects (at the moment, as related objects
// ids aren't in the state graphs.
// - Probably more...
// - We can only return a single record from an VersionedFetcher
// instance.
// - We can't request related sub objects (at the moment, as related objects
// ids aren't in the state graphs.
// - Probably more...
//
// Future optimizations:
// - Incremental checkpoint/snapshotting
Expand Down
4 changes: 4 additions & 0 deletions errors/errors.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ func NewKV(key string, value any) KV {
// A stacktrace will be yielded if formatting with a `+`, e.g `fmt.Sprintf("%+v", err)`.
// This function will not be inlined by the compiler as it will spoil any stacktrace
// generated.
//
//go:noinline
func New(message string, keyvals ...KV) error {
return withStackTrace(message, 1, keyvals...)
Expand All @@ -54,6 +55,7 @@ func New(message string, keyvals ...KV) error {
// the given inner error, suffixing any key-value pairs provided.
// This function will not be inlined by the compiler as it will spoil any stacktrace
// generated.
//
//go:noinline
func Wrap(message string, inner error, keyvals ...KV) error {
err := withStackTrace(message, 1, keyvals...)
Expand All @@ -67,6 +69,7 @@ func Is(err, target error) bool {

// This function will not be inlined by the compiler as it will spoil any stacktrace
// generated.
//
//go:noinline
func WithStack(err error, keyvals ...KV) error {
return withStackTrace(err.Error(), 1, keyvals...)
Expand All @@ -79,6 +82,7 @@ func WithStack(err error, keyvals ...KV) error {
//
// This function will not be inlined by the compiler as it will spoil any stacktrace
// generated.
//
//go:noinline
func withStackTrace(message string, depthToSkip int, keyvals ...KV) *defraError {
stackBuffer := make([]uintptr, MaxStackDepth)
Expand Down
3 changes: 2 additions & 1 deletion net/pb/custom.go
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,8 @@ func (id ProtoPeerID) Size() int {
}

// ProtoAddr is a custom type used by gogo to serde raw multiaddresses into
// the ma.Multiaddr type, and back.
//
// the ma.Multiaddr type, and back.
type ProtoAddr struct {
ma.Multiaddr
}
Expand Down
1 change: 1 addition & 0 deletions net/server.go
Original file line number Diff line number Diff line change
Expand Up @@ -265,6 +265,7 @@ func (s *server) addPubSubTopic(dockey string) error {
}

// removePubSubTopic unsubscribes to a DocKey topic
//
//nolint:unused
func (s *server) removePubSubTopic(dockey string) error {
if s.peer.ps == nil {
Expand Down
50 changes: 26 additions & 24 deletions planner/explain.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,31 +55,33 @@ const (
// buildSimpleExplainGraph builds the explainGraph from the given top level plan.
//
// Request:
// query @explain {
// user {
// _key
// age
// name
// }
// }
//
// Response:
// {
// "data": [
// {
// "explain": {
// "selectTopNode": {
// "selectNode": {
// ...
// "scanNode": {
// ...
// }
// }
// }
// }
// }
// ]
// }
// query @explain {
// user {
// _key
// age
// name
// }
// }
//
// Response:
//
// {
// "data": [
// {
// "explain": {
// "selectTopNode": {
// "selectNode": {
// ...
// "scanNode": {
// ...
// }
// }
// }
// }
// }
// ]
// }
func buildSimpleExplainGraph(source planNode) (map[string]any, error) {
explainGraph := map[string]any{}

Expand Down
5 changes: 3 additions & 2 deletions planner/group.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,8 +39,9 @@ type groupNode struct {
}

// Creates a new group node. The function is recursive and will construct the node-chain for any
// child (`_group`) collections. `groupSelect` is optional and will typically be nil if the
// child `_group` is not requested.
//
// child (`_group`) collections. `groupSelect` is optional and will typically be nil if the
// child `_group` is not requested.
func (p *Planner) GroupBy(n *mapper.GroupBy, parsed *mapper.Select, childSelects []*mapper.Select) (*groupNode, error) {
if n == nil {
return nil, nil
Expand Down
22 changes: 11 additions & 11 deletions planner/multi.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,22 +54,22 @@ type appendNode interface {
// if a single request has multiple Select statements at the
// same depth in the request.
// Eg:
// user {
// _key
// name
// friends {
// name
// }
// _version {
// cid
// }
// }
//
// user {
// _key
// name
// friends {
// name
// }
// _version {
// cid
// }
// }
//
// In this example, both the friends selection and the _version
// selection require their own planNode sub graphs to complete.
// However, they are entirely independent graphs, so they can
// be executed in parallel.
//
type parallelNode struct { // serialNode?
documentIterator
docMapper
Expand Down
3 changes: 2 additions & 1 deletion planner/planner.go
Original file line number Diff line number Diff line change
Expand Up @@ -439,7 +439,8 @@ func walkAndFindPlanType[T planNode](plan planNode) (T, bool) {
}

// explainRequest walks through the plan graph, and outputs the concrete planNodes that should
// be executed, maintaing their order in the plan graph (does not actually execute them).
//
// be executed, maintaing their order in the plan graph (does not actually execute them).
func (p *Planner) explainRequest(
ctx context.Context,
plan planNode,
Expand Down
2 changes: 1 addition & 1 deletion tests/bench/query/simple/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ import (
)

var (
//log = logging.MustNewLogger("defra.bench")
// log = logging.MustNewLogger("defra.bench")
)

func runQueryBenchGet(
Expand Down
27 changes: 14 additions & 13 deletions tests/integration/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -148,19 +148,19 @@ If this is set to true the integration test suite will instead of it's normal pr
the following:
On [package] Init:
- Get the (local) latest commit from the target/parent branch // code assumes
git fetch has been done
- Check to see if a clone of that commit/branch is available in the temp dir, and
if not clone the target branch
- Check to see if there are any new .md files in the current branch's data_format_changes
dir (vs the target branch)
- Get the (local) latest commit from the target/parent branch // code assumes
git fetch has been done
- Check to see if a clone of that commit/branch is available in the temp dir, and
if not clone the target branch
- Check to see if there are any new .md files in the current branch's data_format_changes
dir (vs the target branch)
For each test:
- If new documentation detected, pass the test and exit
- Create a new (test/auto-deleted) temp dir for defra to live/run in
- Run the test setup (add initial schema, docs, updates) using the target branch (test is skipped
if test does not exist in target and is new to this branch)
- Run the test request and assert results (as per normal tests) using the current branch
- If new documentation detected, pass the test and exit
- Create a new (test/auto-deleted) temp dir for defra to live/run in
- Run the test setup (add initial schema, docs, updates) using the target branch (test is skipped
if test does not exist in target and is new to this branch)
- Run the test request and assert results (as per normal tests) using the current branch
*/
var DetectDbChanges bool
var SetupOnly bool
Expand Down Expand Up @@ -222,9 +222,10 @@ func IsDetectingDbChanges() bool {
}

// AssertPanicAndSkipChangeDetection asserts that the code of function actually panics,
// also ensures the change detection is skipped so no false fails happen.
//
// Usage: AssertPanicAndSkipChangeDetection(t, func() { executeTestCase(t, test) })
// also ensures the change detection is skipped so no false fails happen.
//
// Usage: AssertPanicAndSkipChangeDetection(t, func() { executeTestCase(t, test) })
func AssertPanicAndSkipChangeDetection(t *testing.T, f assert.PanicTestFunc) bool {
if IsDetectingDbChanges() {
// The `assert.Panics` call will falsely fail if this test is executed during
Expand Down

0 comments on commit 7eb225b

Please sign in to comment.