diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE deleted file mode 100644 index bc52e96..0000000 --- a/vendor/github.com/davecgh/go-spew/LICENSE +++ /dev/null @@ -1,15 +0,0 @@ -ISC License - -Copyright (c) 2012-2016 Dave Collins - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go deleted file mode 100644 index 5132681..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright (c) 2015-2016 Dave Collins -// -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. -// -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when the code is running on Google App Engine, compiled by GopherJS, or -// "-tags safe" is added to the go build command line. The "disableunsafe" -// tag is deprecated and thus should not be used. - -package spew - -import "reflect" - -const ( - // UnsafeDisabled is a build-time constant which specifies whether or - // not access to the unsafe package is available. - UnsafeDisabled = true -) - -// unsafeReflectValue typically converts the passed reflect.Value into a one -// that bypasses the typical safety restrictions preventing access to -// unaddressable and unexported data. However, doing this relies on access to -// the unsafe package. This is a stub version which simply returns the passed -// reflect.Value when the unsafe package is not available. -func unsafeReflectValue(v reflect.Value) reflect.Value { - return v -} diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go deleted file mode 100644 index 1be8ce9..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/common.go +++ /dev/null @@ -1,341 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "io" - "reflect" - "sort" - "strconv" -) - -// Some constants in the form of bytes to avoid string overhead. This mirrors -// the technique used in the fmt package. -var ( - panicBytes = []byte("(PANIC=") - plusBytes = []byte("+") - iBytes = []byte("i") - trueBytes = []byte("true") - falseBytes = []byte("false") - interfaceBytes = []byte("(interface {})") - commaNewlineBytes = []byte(",\n") - newlineBytes = []byte("\n") - openBraceBytes = []byte("{") - openBraceNewlineBytes = []byte("{\n") - closeBraceBytes = []byte("}") - asteriskBytes = []byte("*") - colonBytes = []byte(":") - colonSpaceBytes = []byte(": ") - openParenBytes = []byte("(") - closeParenBytes = []byte(")") - spaceBytes = []byte(" ") - pointerChainBytes = []byte("->") - nilAngleBytes = []byte("") - maxNewlineBytes = []byte("\n") - maxShortBytes = []byte("") - circularBytes = []byte("") - circularShortBytes = []byte("") - invalidAngleBytes = []byte("") - openBracketBytes = []byte("[") - closeBracketBytes = []byte("]") - percentBytes = []byte("%") - precisionBytes = []byte(".") - openAngleBytes = []byte("<") - closeAngleBytes = []byte(">") - openMapBytes = []byte("map[") - closeMapBytes = []byte("]") - lenEqualsBytes = []byte("len=") - capEqualsBytes = []byte("cap=") -) - -// hexDigits is used to map a decimal value to a hex digit. -var hexDigits = "0123456789abcdef" - -// catchPanic handles any panics that might occur during the handleMethods -// calls. -func catchPanic(w io.Writer, v reflect.Value) { - if err := recover(); err != nil { - w.Write(panicBytes) - fmt.Fprintf(w, "%v", err) - w.Write(closeParenBytes) - } -} - -// handleMethods attempts to call the Error and String methods on the underlying -// type the passed reflect.Value represents and outputes the result to Writer w. -// -// It handles panics in any called methods by catching and displaying the error -// as the formatted value. -func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) { - // We need an interface to check if the type implements the error or - // Stringer interface. However, the reflect package won't give us an - // interface on certain things like unexported struct fields in order - // to enforce visibility rules. We use unsafe, when it's available, - // to bypass these restrictions since this package does not mutate the - // values. - if !v.CanInterface() { - if UnsafeDisabled { - return false - } - - v = unsafeReflectValue(v) - } - - // Choose whether or not to do error and Stringer interface lookups against - // the base type or a pointer to the base type depending on settings. - // Technically calling one of these methods with a pointer receiver can - // mutate the value, however, types which choose to satisify an error or - // Stringer interface with a pointer receiver should not be mutating their - // state inside these interface methods. - if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() { - v = unsafeReflectValue(v) - } - if v.CanAddr() { - v = v.Addr() - } - - // Is it an error or Stringer? - switch iface := v.Interface().(type) { - case error: - defer catchPanic(w, v) - if cs.ContinueOnMethod { - w.Write(openParenBytes) - w.Write([]byte(iface.Error())) - w.Write(closeParenBytes) - w.Write(spaceBytes) - return false - } - - w.Write([]byte(iface.Error())) - return true - - case fmt.Stringer: - defer catchPanic(w, v) - if cs.ContinueOnMethod { - w.Write(openParenBytes) - w.Write([]byte(iface.String())) - w.Write(closeParenBytes) - w.Write(spaceBytes) - return false - } - w.Write([]byte(iface.String())) - return true - } - return false -} - -// printBool outputs a boolean value as true or false to Writer w. -func printBool(w io.Writer, val bool) { - if val { - w.Write(trueBytes) - } else { - w.Write(falseBytes) - } -} - -// printInt outputs a signed integer value to Writer w. -func printInt(w io.Writer, val int64, base int) { - w.Write([]byte(strconv.FormatInt(val, base))) -} - -// printUint outputs an unsigned integer value to Writer w. -func printUint(w io.Writer, val uint64, base int) { - w.Write([]byte(strconv.FormatUint(val, base))) -} - -// printFloat outputs a floating point value using the specified precision, -// which is expected to be 32 or 64bit, to Writer w. -func printFloat(w io.Writer, val float64, precision int) { - w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision))) -} - -// printComplex outputs a complex value using the specified float precision -// for the real and imaginary parts to Writer w. -func printComplex(w io.Writer, c complex128, floatPrecision int) { - r := real(c) - w.Write(openParenBytes) - w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision))) - i := imag(c) - if i >= 0 { - w.Write(plusBytes) - } - w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision))) - w.Write(iBytes) - w.Write(closeParenBytes) -} - -// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x' -// prefix to Writer w. -func printHexPtr(w io.Writer, p uintptr) { - // Null pointer. - num := uint64(p) - if num == 0 { - w.Write(nilAngleBytes) - return - } - - // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix - buf := make([]byte, 18) - - // It's simpler to construct the hex string right to left. - base := uint64(16) - i := len(buf) - 1 - for num >= base { - buf[i] = hexDigits[num%base] - num /= base - i-- - } - buf[i] = hexDigits[num] - - // Add '0x' prefix. - i-- - buf[i] = 'x' - i-- - buf[i] = '0' - - // Strip unused leading bytes. - buf = buf[i:] - w.Write(buf) -} - -// valuesSorter implements sort.Interface to allow a slice of reflect.Value -// elements to be sorted. -type valuesSorter struct { - values []reflect.Value - strings []string // either nil or same len and values - cs *ConfigState -} - -// newValuesSorter initializes a valuesSorter instance, which holds a set of -// surrogate keys on which the data should be sorted. It uses flags in -// ConfigState to decide if and how to populate those surrogate keys. -func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface { - vs := &valuesSorter{values: values, cs: cs} - if canSortSimply(vs.values[0].Kind()) { - return vs - } - if !cs.DisableMethods { - vs.strings = make([]string, len(values)) - for i := range vs.values { - b := bytes.Buffer{} - if !handleMethods(cs, &b, vs.values[i]) { - vs.strings = nil - break - } - vs.strings[i] = b.String() - } - } - if vs.strings == nil && cs.SpewKeys { - vs.strings = make([]string, len(values)) - for i := range vs.values { - vs.strings[i] = Sprintf("%#v", vs.values[i].Interface()) - } - } - return vs -} - -// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted -// directly, or whether it should be considered for sorting by surrogate keys -// (if the ConfigState allows it). -func canSortSimply(kind reflect.Kind) bool { - // This switch parallels valueSortLess, except for the default case. - switch kind { - case reflect.Bool: - return true - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - return true - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - return true - case reflect.Float32, reflect.Float64: - return true - case reflect.String: - return true - case reflect.Uintptr: - return true - case reflect.Array: - return true - } - return false -} - -// Len returns the number of values in the slice. It is part of the -// sort.Interface implementation. -func (s *valuesSorter) Len() int { - return len(s.values) -} - -// Swap swaps the values at the passed indices. It is part of the -// sort.Interface implementation. -func (s *valuesSorter) Swap(i, j int) { - s.values[i], s.values[j] = s.values[j], s.values[i] - if s.strings != nil { - s.strings[i], s.strings[j] = s.strings[j], s.strings[i] - } -} - -// valueSortLess returns whether the first value should sort before the second -// value. It is used by valueSorter.Less as part of the sort.Interface -// implementation. -func valueSortLess(a, b reflect.Value) bool { - switch a.Kind() { - case reflect.Bool: - return !a.Bool() && b.Bool() - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - return a.Int() < b.Int() - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - return a.Uint() < b.Uint() - case reflect.Float32, reflect.Float64: - return a.Float() < b.Float() - case reflect.String: - return a.String() < b.String() - case reflect.Uintptr: - return a.Uint() < b.Uint() - case reflect.Array: - // Compare the contents of both arrays. - l := a.Len() - for i := 0; i < l; i++ { - av := a.Index(i) - bv := b.Index(i) - if av.Interface() == bv.Interface() { - continue - } - return valueSortLess(av, bv) - } - } - return a.String() < b.String() -} - -// Less returns whether the value at index i should sort before the -// value at index j. It is part of the sort.Interface implementation. -func (s *valuesSorter) Less(i, j int) bool { - if s.strings == nil { - return valueSortLess(s.values[i], s.values[j]) - } - return s.strings[i] < s.strings[j] -} - -// sortValues is a sort function that handles both native types and any type that -// can be converted to error or Stringer. Other inputs are sorted according to -// their Value.String() value to ensure display stability. -func sortValues(values []reflect.Value, cs *ConfigState) { - if len(values) == 0 { - return - } - sort.Sort(newValuesSorter(values, cs)) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go deleted file mode 100644 index 1b304cf..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/config.go +++ /dev/null @@ -1,278 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "io" -) - -// ConfigState houses the configuration options used by spew to format and -// display values. There is a global instance, Config, that is used to control -// all top-level Formatter and Dump functionality. Each ConfigState instance -// provides methods equivalent to the top-level functions. -// -// The zero value for ConfigState provides no indentation. You would typically -// want to set it to a space or a tab. -// -// Alternatively, you can use NewDefaultConfig to get a ConfigState instance -// with default settings. See the documentation of NewDefaultConfig for default -// values. -type ConfigState struct { - // Indent specifies the string to use for each indentation level. The - // global config instance that all top-level functions use set this to a - // single space by default. If you would like more indentation, you might - // set this to a tab with "\t" or perhaps two spaces with " ". - Indent string - - // MaxDepth controls the maximum number of levels to descend into nested - // data structures. The default, 0, means there is no limit. - // - // NOTE: Circular data structures are properly detected, so it is not - // necessary to set this value unless you specifically want to limit deeply - // nested data structures. - MaxDepth int - - // DisableMethods specifies whether or not error and Stringer interfaces are - // invoked for types that implement them. - DisableMethods bool - - // DisablePointerMethods specifies whether or not to check for and invoke - // error and Stringer interfaces on types which only accept a pointer - // receiver when the current type is not a pointer. - // - // NOTE: This might be an unsafe action since calling one of these methods - // with a pointer receiver could technically mutate the value, however, - // in practice, types which choose to satisify an error or Stringer - // interface with a pointer receiver should not be mutating their state - // inside these interface methods. As a result, this option relies on - // access to the unsafe package, so it will not have any effect when - // running in environments without access to the unsafe package such as - // Google App Engine or with the "safe" build tag specified. - DisablePointerMethods bool - - // DisablePointerAddresses specifies whether to disable the printing of - // pointer addresses. This is useful when diffing data structures in tests. - DisablePointerAddresses bool - - // DisableCapacities specifies whether to disable the printing of capacities - // for arrays, slices, maps and channels. This is useful when diffing - // data structures in tests. - DisableCapacities bool - - // ContinueOnMethod specifies whether or not recursion should continue once - // a custom error or Stringer interface is invoked. The default, false, - // means it will print the results of invoking the custom error or Stringer - // interface and return immediately instead of continuing to recurse into - // the internals of the data type. - // - // NOTE: This flag does not have any effect if method invocation is disabled - // via the DisableMethods or DisablePointerMethods options. - ContinueOnMethod bool - - // SortKeys specifies map keys should be sorted before being printed. Use - // this to have a more deterministic, diffable output. Note that only - // native types (bool, int, uint, floats, uintptr and string) and types - // that support the error or Stringer interfaces (if methods are - // enabled) are supported, with other types sorted according to the - // reflect.Value.String() output which guarantees display stability. - SortKeys bool - - // SpewKeys specifies that, as a last resort attempt, map keys should - // be spewed to strings and sorted by those strings. This is only - // considered if SortKeys is true. - SpewKeys bool -} - -// Config is the active configuration of the top-level functions. -// The configuration can be changed by modifying the contents of spew.Config. -var Config = ConfigState{Indent: " "} - -// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the formatted string as a value that satisfies error. See NewFormatter -// for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) { - return fmt.Errorf(format, c.convertArgs(a)...) -} - -// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprint(w, c.convertArgs(a)...) -} - -// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - return fmt.Fprintf(w, format, c.convertArgs(a)...) -} - -// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it -// passed with a Formatter interface returned by c.NewFormatter. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprintln(w, c.convertArgs(a)...) -} - -// Print is a wrapper for fmt.Print that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Print(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Print(a ...interface{}) (n int, err error) { - return fmt.Print(c.convertArgs(a)...) -} - -// Printf is a wrapper for fmt.Printf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) { - return fmt.Printf(format, c.convertArgs(a)...) -} - -// Println is a wrapper for fmt.Println that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Println(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Println(a ...interface{}) (n int, err error) { - return fmt.Println(c.convertArgs(a)...) -} - -// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprint(a ...interface{}) string { - return fmt.Sprint(c.convertArgs(a)...) -} - -// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprintf(format string, a ...interface{}) string { - return fmt.Sprintf(format, c.convertArgs(a)...) -} - -// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it -// were passed with a Formatter interface returned by c.NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprintln(a ...interface{}) string { - return fmt.Sprintln(c.convertArgs(a)...) -} - -/* -NewFormatter returns a custom formatter that satisfies the fmt.Formatter -interface. As a result, it integrates cleanly with standard fmt package -printing functions. The formatter is useful for inline printing of smaller data -types similar to the standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Typically this function shouldn't be called directly. It is much easier to make -use of the custom formatter by calling one of the convenience functions such as -c.Printf, c.Println, or c.Printf. -*/ -func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter { - return newFormatter(c, v) -} - -// Fdump formats and displays the passed arguments to io.Writer w. It formats -// exactly the same as Dump. -func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) { - fdump(c, w, a...) -} - -// Sdump returns a string with the passed arguments formatted exactly the same -// as Dump. -func (c *ConfigState) Sdump(a ...interface{}) string { - var buf bytes.Buffer - fdump(c, &buf, a...) - return buf.String() -} - -// convertArgs accepts a slice of arguments and returns a slice of the same -// length with each argument converted to a spew Formatter interface using -// the ConfigState associated with s. -func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) { - formatters = make([]interface{}, len(args)) - for index, arg := range args { - formatters[index] = newFormatter(c, arg) - } - return formatters -} - -// NewDefaultConfig returns a ConfigState with the following default settings. -// -// Indent: " " -// MaxDepth: 0 -// DisableMethods: false -// DisablePointerMethods: false -// ContinueOnMethod: false -// SortKeys: false -func NewDefaultConfig() *ConfigState { - return &ConfigState{Indent: " "} -} diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go deleted file mode 100644 index aacaac6..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/doc.go +++ /dev/null @@ -1,211 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -/* -Package spew implements a deep pretty printer for Go data structures to aid in -debugging. - -A quick overview of the additional features spew provides over the built-in -printing facilities for Go data types are as follows: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output (only when using - Dump style) - -There are two different approaches spew allows for dumping Go data structures: - - * Dump style which prints with newlines, customizable indentation, - and additional debug information such as types and all pointer addresses - used to indirect to the final value - * A custom Formatter interface that integrates cleanly with the standard fmt - package and replaces %v, %+v, %#v, and %#+v to provide inline printing - similar to the default %v while providing the additional functionality - outlined above and passing unsupported format verbs such as %x and %q - along to fmt - -Quick Start - -This section demonstrates how to quickly get started with spew. See the -sections below for further details on formatting and configuration options. - -To dump a variable with full newlines, indentation, type, and pointer -information use Dump, Fdump, or Sdump: - spew.Dump(myVar1, myVar2, ...) - spew.Fdump(someWriter, myVar1, myVar2, ...) - str := spew.Sdump(myVar1, myVar2, ...) - -Alternatively, if you would prefer to use format strings with a compacted inline -printing style, use the convenience wrappers Printf, Fprintf, etc with -%v (most compact), %+v (adds pointer addresses), %#v (adds types), or -%#+v (adds types and pointer addresses): - spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - -Configuration Options - -Configuration of spew is handled by fields in the ConfigState type. For -convenience, all of the top-level functions use a global state available -via the spew.Config global. - -It is also possible to create a ConfigState instance that provides methods -equivalent to the top-level functions. This allows concurrent configuration -options. See the ConfigState documentation for more details. - -The following configuration options are available: - * Indent - String to use for each indentation level for Dump functions. - It is a single space by default. A popular alternative is "\t". - - * MaxDepth - Maximum number of levels to descend into nested data structures. - There is no limit by default. - - * DisableMethods - Disables invocation of error and Stringer interface methods. - Method invocation is enabled by default. - - * DisablePointerMethods - Disables invocation of error and Stringer interface methods on types - which only accept pointer receivers from non-pointer variables. - Pointer method invocation is enabled by default. - - * DisablePointerAddresses - DisablePointerAddresses specifies whether to disable the printing of - pointer addresses. This is useful when diffing data structures in tests. - - * DisableCapacities - DisableCapacities specifies whether to disable the printing of - capacities for arrays, slices, maps and channels. This is useful when - diffing data structures in tests. - - * ContinueOnMethod - Enables recursion into types after invoking error and Stringer interface - methods. Recursion after method invocation is disabled by default. - - * SortKeys - Specifies map keys should be sorted before being printed. Use - this to have a more deterministic, diffable output. Note that - only native types (bool, int, uint, floats, uintptr and string) - and types which implement error or Stringer interfaces are - supported with other types sorted according to the - reflect.Value.String() output which guarantees display - stability. Natural map order is used by default. - - * SpewKeys - Specifies that, as a last resort attempt, map keys should be - spewed to strings and sorted by those strings. This is only - considered if SortKeys is true. - -Dump Usage - -Simply call spew.Dump with a list of variables you want to dump: - - spew.Dump(myVar1, myVar2, ...) - -You may also call spew.Fdump if you would prefer to output to an arbitrary -io.Writer. For example, to dump to standard error: - - spew.Fdump(os.Stderr, myVar1, myVar2, ...) - -A third option is to call spew.Sdump to get the formatted output as a string: - - str := spew.Sdump(myVar1, myVar2, ...) - -Sample Dump Output - -See the Dump example for details on the setup of the types and variables being -shown here. - - (main.Foo) { - unexportedField: (*main.Bar)(0xf84002e210)({ - flag: (main.Flag) flagTwo, - data: (uintptr) - }), - ExportedField: (map[interface {}]interface {}) (len=1) { - (string) (len=3) "one": (bool) true - } - } - -Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C -command as shown. - ([]uint8) (len=32 cap=32) { - 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | - 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| - 00000020 31 32 |12| - } - -Custom Formatter - -Spew provides a custom formatter that implements the fmt.Formatter interface -so that it integrates cleanly with standard fmt package printing functions. The -formatter is useful for inline printing of smaller data types similar to the -standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Custom Formatter Usage - -The simplest way to make use of the spew custom formatter is to call one of the -convenience functions such as spew.Printf, spew.Println, or spew.Printf. The -functions have syntax you are most likely already familiar with: - - spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - spew.Println(myVar, myVar2) - spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - -See the Index for the full list convenience functions. - -Sample Formatter Output - -Double pointer to a uint8: - %v: <**>5 - %+v: <**>(0xf8400420d0->0xf8400420c8)5 - %#v: (**uint8)5 - %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 - -Pointer to circular struct with a uint8 field and a pointer to itself: - %v: <*>{1 <*>} - %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} - %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} - %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} - -See the Printf example for details on the setup of variables being shown -here. - -Errors - -Since it is possible for custom Stringer/error interfaces to panic, spew -detects them and handles them internally by printing the panic information -inline with the output. Since spew is intended to provide deep pretty printing -capabilities on structures, it intentionally does not return any errors. -*/ -package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go deleted file mode 100644 index fe2499d..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/dump.go +++ /dev/null @@ -1,481 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "encoding/hex" - "fmt" - "io" - "reflect" - "regexp" - "strconv" - "strings" -) - -var ( - // uint8Type is a reflect.Type representing a uint8. It is used to - // convert cgo types to uint8 slices for hexdumping. - uint8Type = reflect.TypeOf(uint8(0)) - - // cCharRE is a regular expression that matches a cgo char. - // It is used to detect character arrays to hexdump them. - cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`) - - // cUnsignedCharRE is a regular expression that matches a cgo unsigned - // char. It is used to detect unsigned character arrays to hexdump - // them. - cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`) - - // cUint8tCharRE is a regular expression that matches a cgo uint8_t. - // It is used to detect uint8_t arrays to hexdump them. - cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`) -) - -// dumpState contains information about the state of a dump operation. -type dumpState struct { - w io.Writer - depth int - pointers map[uintptr]int - ignoreNextType bool - ignoreNextIndent bool - cs *ConfigState -} - -// indent performs indentation according to the depth level and cs.Indent -// option. -func (d *dumpState) indent() { - if d.ignoreNextIndent { - d.ignoreNextIndent = false - return - } - d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth)) -} - -// unpackValue returns values inside of non-nil interfaces when possible. -// This is useful for data types like structs, arrays, slices, and maps which -// can contain varying types packed inside an interface. -func (d *dumpState) unpackValue(v reflect.Value) reflect.Value { - if v.Kind() == reflect.Interface && !v.IsNil() { - v = v.Elem() - } - return v -} - -// dumpPtr handles formatting of pointers by indirecting them as necessary. -func (d *dumpState) dumpPtr(v reflect.Value) { - // Remove pointers at or below the current depth from map used to detect - // circular refs. - for k, depth := range d.pointers { - if depth >= d.depth { - delete(d.pointers, k) - } - } - - // Keep list of all dereferenced pointers to show later. - pointerChain := make([]uintptr, 0) - - // Figure out how many levels of indirection there are by dereferencing - // pointers and unpacking interfaces down the chain while detecting circular - // references. - nilFound := false - cycleFound := false - indirects := 0 - ve := v - for ve.Kind() == reflect.Ptr { - if ve.IsNil() { - nilFound = true - break - } - indirects++ - addr := ve.Pointer() - pointerChain = append(pointerChain, addr) - if pd, ok := d.pointers[addr]; ok && pd < d.depth { - cycleFound = true - indirects-- - break - } - d.pointers[addr] = d.depth - - ve = ve.Elem() - if ve.Kind() == reflect.Interface { - if ve.IsNil() { - nilFound = true - break - } - ve = ve.Elem() - } - } - - // Display type information. - d.w.Write(openParenBytes) - d.w.Write(bytes.Repeat(asteriskBytes, indirects)) - d.w.Write([]byte(ve.Type().String())) - d.w.Write(closeParenBytes) - - // Display pointer information. - if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 { - d.w.Write(openParenBytes) - for i, addr := range pointerChain { - if i > 0 { - d.w.Write(pointerChainBytes) - } - printHexPtr(d.w, addr) - } - d.w.Write(closeParenBytes) - } - - // Display dereferenced value. - d.w.Write(openParenBytes) - switch { - case nilFound: - d.w.Write(nilAngleBytes) - - case cycleFound: - d.w.Write(circularBytes) - - default: - d.ignoreNextType = true - d.dump(ve) - } - d.w.Write(closeParenBytes) -} - -// dumpSlice handles formatting of arrays and slices. Byte (uint8 under -// reflection) arrays and slices are dumped in hexdump -C fashion. -func (d *dumpState) dumpSlice(v reflect.Value) { - // Determine whether this type should be hex dumped or not. Also, - // for types which should be hexdumped, try to use the underlying data - // first, then fall back to trying to convert them to a uint8 slice. - var buf []uint8 - doConvert := false - doHexDump := false - numEntries := v.Len() - if numEntries > 0 { - vt := v.Index(0).Type() - vts := vt.String() - switch { - // C types that need to be converted. - case cCharRE.MatchString(vts): - fallthrough - case cUnsignedCharRE.MatchString(vts): - fallthrough - case cUint8tCharRE.MatchString(vts): - doConvert = true - - // Try to use existing uint8 slices and fall back to converting - // and copying if that fails. - case vt.Kind() == reflect.Uint8: - // We need an addressable interface to convert the type - // to a byte slice. However, the reflect package won't - // give us an interface on certain things like - // unexported struct fields in order to enforce - // visibility rules. We use unsafe, when available, to - // bypass these restrictions since this package does not - // mutate the values. - vs := v - if !vs.CanInterface() || !vs.CanAddr() { - vs = unsafeReflectValue(vs) - } - if !UnsafeDisabled { - vs = vs.Slice(0, numEntries) - - // Use the existing uint8 slice if it can be - // type asserted. - iface := vs.Interface() - if slice, ok := iface.([]uint8); ok { - buf = slice - doHexDump = true - break - } - } - - // The underlying data needs to be converted if it can't - // be type asserted to a uint8 slice. - doConvert = true - } - - // Copy and convert the underlying type if needed. - if doConvert && vt.ConvertibleTo(uint8Type) { - // Convert and copy each element into a uint8 byte - // slice. - buf = make([]uint8, numEntries) - for i := 0; i < numEntries; i++ { - vv := v.Index(i) - buf[i] = uint8(vv.Convert(uint8Type).Uint()) - } - doHexDump = true - } - } - - // Hexdump the entire slice as needed. - if doHexDump { - indent := strings.Repeat(d.cs.Indent, d.depth) - str := indent + hex.Dump(buf) - str = strings.Replace(str, "\n", "\n"+indent, -1) - str = strings.TrimRight(str, d.cs.Indent) - d.w.Write([]byte(str)) - return - } - - // Recursively call dump for each item. - for i := 0; i < numEntries; i++ { - d.dump(d.unpackValue(v.Index(i))) - if i < (numEntries - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } -} - -// dump is the main workhorse for dumping a value. It uses the passed reflect -// value to figure out what kind of object we are dealing with and formats it -// appropriately. It is a recursive function, however circular data structures -// are detected and handled properly. -func (d *dumpState) dump(v reflect.Value) { - // Handle invalid reflect values immediately. - kind := v.Kind() - if kind == reflect.Invalid { - d.w.Write(invalidAngleBytes) - return - } - - // Handle pointers specially. - if kind == reflect.Ptr { - d.indent() - d.dumpPtr(v) - return - } - - // Print type information unless already handled elsewhere. - if !d.ignoreNextType { - d.indent() - d.w.Write(openParenBytes) - d.w.Write([]byte(v.Type().String())) - d.w.Write(closeParenBytes) - d.w.Write(spaceBytes) - } - d.ignoreNextType = false - - // Display length and capacity if the built-in len and cap functions - // work with the value's kind and the len/cap itself is non-zero. - valueLen, valueCap := 0, 0 - switch v.Kind() { - case reflect.Array, reflect.Slice, reflect.Chan: - valueLen, valueCap = v.Len(), v.Cap() - case reflect.Map, reflect.String: - valueLen = v.Len() - } - if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 { - d.w.Write(openParenBytes) - if valueLen != 0 { - d.w.Write(lenEqualsBytes) - printInt(d.w, int64(valueLen), 10) - } - if !d.cs.DisableCapacities && valueCap != 0 { - if valueLen != 0 { - d.w.Write(spaceBytes) - } - d.w.Write(capEqualsBytes) - printInt(d.w, int64(valueCap), 10) - } - d.w.Write(closeParenBytes) - d.w.Write(spaceBytes) - } - - // Call Stringer/error interfaces if they exist and the handle methods flag - // is enabled - if !d.cs.DisableMethods { - if (kind != reflect.Invalid) && (kind != reflect.Interface) { - if handled := handleMethods(d.cs, d.w, v); handled { - return - } - } - } - - switch kind { - case reflect.Invalid: - // Do nothing. We should never get here since invalid has already - // been handled above. - - case reflect.Bool: - printBool(d.w, v.Bool()) - - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - printInt(d.w, v.Int(), 10) - - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - printUint(d.w, v.Uint(), 10) - - case reflect.Float32: - printFloat(d.w, v.Float(), 32) - - case reflect.Float64: - printFloat(d.w, v.Float(), 64) - - case reflect.Complex64: - printComplex(d.w, v.Complex(), 32) - - case reflect.Complex128: - printComplex(d.w, v.Complex(), 64) - - case reflect.Slice: - if v.IsNil() { - d.w.Write(nilAngleBytes) - break - } - fallthrough - - case reflect.Array: - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - d.dumpSlice(v) - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.String: - d.w.Write([]byte(strconv.Quote(v.String()))) - - case reflect.Interface: - // The only time we should get here is for nil interfaces due to - // unpackValue calls. - if v.IsNil() { - d.w.Write(nilAngleBytes) - } - - case reflect.Ptr: - // Do nothing. We should never get here since pointers have already - // been handled above. - - case reflect.Map: - // nil maps should be indicated as different than empty maps - if v.IsNil() { - d.w.Write(nilAngleBytes) - break - } - - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - numEntries := v.Len() - keys := v.MapKeys() - if d.cs.SortKeys { - sortValues(keys, d.cs) - } - for i, key := range keys { - d.dump(d.unpackValue(key)) - d.w.Write(colonSpaceBytes) - d.ignoreNextIndent = true - d.dump(d.unpackValue(v.MapIndex(key))) - if i < (numEntries - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.Struct: - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - vt := v.Type() - numFields := v.NumField() - for i := 0; i < numFields; i++ { - d.indent() - vtf := vt.Field(i) - d.w.Write([]byte(vtf.Name)) - d.w.Write(colonSpaceBytes) - d.ignoreNextIndent = true - d.dump(d.unpackValue(v.Field(i))) - if i < (numFields - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.Uintptr: - printHexPtr(d.w, uintptr(v.Uint())) - - case reflect.UnsafePointer, reflect.Chan, reflect.Func: - printHexPtr(d.w, v.Pointer()) - - // There were not any other types at the time this code was written, but - // fall back to letting the default fmt package handle it in case any new - // types are added. - default: - if v.CanInterface() { - fmt.Fprintf(d.w, "%v", v.Interface()) - } else { - fmt.Fprintf(d.w, "%v", v.String()) - } - } -} - -// fdump is a helper function to consolidate the logic from the various public -// methods which take varying writers and config states. -func fdump(cs *ConfigState, w io.Writer, a ...interface{}) { - for _, arg := range a { - if arg == nil { - w.Write(interfaceBytes) - w.Write(spaceBytes) - w.Write(nilAngleBytes) - w.Write(newlineBytes) - continue - } - - d := dumpState{w: w, cs: cs} - d.pointers = make(map[uintptr]int) - d.dump(reflect.ValueOf(arg)) - d.w.Write(newlineBytes) - } -} - -// Fdump formats and displays the passed arguments to io.Writer w. It formats -// exactly the same as Dump. -func Fdump(w io.Writer, a ...interface{}) { - fdump(&Config, w, a...) -} - -// Sdump returns a string with the passed arguments formatted exactly the same -// as Dump. -func Sdump(a ...interface{}) string { - var buf bytes.Buffer - fdump(&Config, &buf, a...) - return buf.String() -} \ No newline at end of file diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go deleted file mode 100644 index b04edb7..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/format.go +++ /dev/null @@ -1,419 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "reflect" - "strconv" - "strings" -) - -// supportedFlags is a list of all the character flags supported by fmt package. -const supportedFlags = "0-+# " - -// formatState implements the fmt.Formatter interface and contains information -// about the state of a formatting operation. The NewFormatter function can -// be used to get a new Formatter which can be used directly as arguments -// in standard fmt package printing calls. -type formatState struct { - value interface{} - fs fmt.State - depth int - pointers map[uintptr]int - ignoreNextType bool - cs *ConfigState -} - -// buildDefaultFormat recreates the original format string without precision -// and width information to pass in to fmt.Sprintf in the case of an -// unrecognized type. Unless new types are added to the language, this -// function won't ever be called. -func (f *formatState) buildDefaultFormat() (format string) { - buf := bytes.NewBuffer(percentBytes) - - for _, flag := range supportedFlags { - if f.fs.Flag(int(flag)) { - buf.WriteRune(flag) - } - } - - buf.WriteRune('v') - - format = buf.String() - return format -} - -// constructOrigFormat recreates the original format string including precision -// and width information to pass along to the standard fmt package. This allows -// automatic deferral of all format strings this package doesn't support. -func (f *formatState) constructOrigFormat(verb rune) (format string) { - buf := bytes.NewBuffer(percentBytes) - - for _, flag := range supportedFlags { - if f.fs.Flag(int(flag)) { - buf.WriteRune(flag) - } - } - - if width, ok := f.fs.Width(); ok { - buf.WriteString(strconv.Itoa(width)) - } - - if precision, ok := f.fs.Precision(); ok { - buf.Write(precisionBytes) - buf.WriteString(strconv.Itoa(precision)) - } - - buf.WriteRune(verb) - - format = buf.String() - return format -} - -// unpackValue returns values inside of non-nil interfaces when possible and -// ensures that types for values which have been unpacked from an interface -// are displayed when the show types flag is also set. -// This is useful for data types like structs, arrays, slices, and maps which -// can contain varying types packed inside an interface. -func (f *formatState) unpackValue(v reflect.Value) reflect.Value { - if v.Kind() == reflect.Interface { - f.ignoreNextType = false - if !v.IsNil() { - v = v.Elem() - } - } - return v -} - -// formatPtr handles formatting of pointers by indirecting them as necessary. -func (f *formatState) formatPtr(v reflect.Value) { - // Display nil if top level pointer is nil. - showTypes := f.fs.Flag('#') - if v.IsNil() && (!showTypes || f.ignoreNextType) { - f.fs.Write(nilAngleBytes) - return - } - - // Remove pointers at or below the current depth from map used to detect - // circular refs. - for k, depth := range f.pointers { - if depth >= f.depth { - delete(f.pointers, k) - } - } - - // Keep list of all dereferenced pointers to possibly show later. - pointerChain := make([]uintptr, 0) - - // Figure out how many levels of indirection there are by derferencing - // pointers and unpacking interfaces down the chain while detecting circular - // references. - nilFound := false - cycleFound := false - indirects := 0 - ve := v - for ve.Kind() == reflect.Ptr { - if ve.IsNil() { - nilFound = true - break - } - indirects++ - addr := ve.Pointer() - pointerChain = append(pointerChain, addr) - if pd, ok := f.pointers[addr]; ok && pd < f.depth { - cycleFound = true - indirects-- - break - } - f.pointers[addr] = f.depth - - ve = ve.Elem() - if ve.Kind() == reflect.Interface { - if ve.IsNil() { - nilFound = true - break - } - ve = ve.Elem() - } - } - - // Display type or indirection level depending on flags. - if showTypes && !f.ignoreNextType { - f.fs.Write(openParenBytes) - f.fs.Write(bytes.Repeat(asteriskBytes, indirects)) - f.fs.Write([]byte(ve.Type().String())) - f.fs.Write(closeParenBytes) - } else { - if nilFound || cycleFound { - indirects += strings.Count(ve.Type().String(), "*") - } - f.fs.Write(openAngleBytes) - f.fs.Write([]byte(strings.Repeat("*", indirects))) - f.fs.Write(closeAngleBytes) - } - - // Display pointer information depending on flags. - if f.fs.Flag('+') && (len(pointerChain) > 0) { - f.fs.Write(openParenBytes) - for i, addr := range pointerChain { - if i > 0 { - f.fs.Write(pointerChainBytes) - } - printHexPtr(f.fs, addr) - } - f.fs.Write(closeParenBytes) - } - - // Display dereferenced value. - switch { - case nilFound: - f.fs.Write(nilAngleBytes) - - case cycleFound: - f.fs.Write(circularShortBytes) - - default: - f.ignoreNextType = true - f.format(ve) - } -} - -// format is the main workhorse for providing the Formatter interface. It -// uses the passed reflect value to figure out what kind of object we are -// dealing with and formats it appropriately. It is a recursive function, -// however circular data structures are detected and handled properly. -func (f *formatState) format(v reflect.Value) { - // Handle invalid reflect values immediately. - kind := v.Kind() - if kind == reflect.Invalid { - f.fs.Write(invalidAngleBytes) - return - } - - // Handle pointers specially. - if kind == reflect.Ptr { - f.formatPtr(v) - return - } - - // Print type information unless already handled elsewhere. - if !f.ignoreNextType && f.fs.Flag('#') { - f.fs.Write(openParenBytes) - f.fs.Write([]byte(v.Type().String())) - f.fs.Write(closeParenBytes) - } - f.ignoreNextType = false - - // Call Stringer/error interfaces if they exist and the handle methods - // flag is enabled. - if !f.cs.DisableMethods { - if (kind != reflect.Invalid) && (kind != reflect.Interface) { - if handled := handleMethods(f.cs, f.fs, v); handled { - return - } - } - } - - switch kind { - case reflect.Invalid: - // Do nothing. We should never get here since invalid has already - // been handled above. - - case reflect.Bool: - printBool(f.fs, v.Bool()) - - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - printInt(f.fs, v.Int(), 10) - - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - printUint(f.fs, v.Uint(), 10) - - case reflect.Float32: - printFloat(f.fs, v.Float(), 32) - - case reflect.Float64: - printFloat(f.fs, v.Float(), 64) - - case reflect.Complex64: - printComplex(f.fs, v.Complex(), 32) - - case reflect.Complex128: - printComplex(f.fs, v.Complex(), 64) - - case reflect.Slice: - if v.IsNil() { - f.fs.Write(nilAngleBytes) - break - } - fallthrough - - case reflect.Array: - f.fs.Write(openBracketBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - numEntries := v.Len() - for i := 0; i < numEntries; i++ { - if i > 0 { - f.fs.Write(spaceBytes) - } - f.ignoreNextType = true - f.format(f.unpackValue(v.Index(i))) - } - } - f.depth-- - f.fs.Write(closeBracketBytes) - - case reflect.String: - f.fs.Write([]byte(v.String())) - - case reflect.Interface: - // The only time we should get here is for nil interfaces due to - // unpackValue calls. - if v.IsNil() { - f.fs.Write(nilAngleBytes) - } - - case reflect.Ptr: - // Do nothing. We should never get here since pointers have already - // been handled above. - - case reflect.Map: - // nil maps should be indicated as different than empty maps - if v.IsNil() { - f.fs.Write(nilAngleBytes) - break - } - - f.fs.Write(openMapBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - keys := v.MapKeys() - if f.cs.SortKeys { - sortValues(keys, f.cs) - } - for i, key := range keys { - if i > 0 { - f.fs.Write(spaceBytes) - } - f.ignoreNextType = true - f.format(f.unpackValue(key)) - f.fs.Write(colonBytes) - f.ignoreNextType = true - f.format(f.unpackValue(v.MapIndex(key))) - } - } - f.depth-- - f.fs.Write(closeMapBytes) - - case reflect.Struct: - numFields := v.NumField() - f.fs.Write(openBraceBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - vt := v.Type() - for i := 0; i < numFields; i++ { - if i > 0 { - f.fs.Write(spaceBytes) - } - vtf := vt.Field(i) - if f.fs.Flag('+') || f.fs.Flag('#') { - f.fs.Write([]byte(vtf.Name)) - f.fs.Write(colonBytes) - } - f.format(f.unpackValue(v.Field(i))) - } - } - f.depth-- - f.fs.Write(closeBraceBytes) - - case reflect.Uintptr: - printHexPtr(f.fs, uintptr(v.Uint())) - - case reflect.UnsafePointer, reflect.Chan, reflect.Func: - printHexPtr(f.fs, v.Pointer()) - - // There were not any other types at the time this code was written, but - // fall back to letting the default fmt package handle it if any get added. - default: - format := f.buildDefaultFormat() - if v.CanInterface() { - fmt.Fprintf(f.fs, format, v.Interface()) - } else { - fmt.Fprintf(f.fs, format, v.String()) - } - } -} - -// Format satisfies the fmt.Formatter interface. See NewFormatter for usage -// details. -func (f *formatState) Format(fs fmt.State, verb rune) { - f.fs = fs - - // Use standard formatting for verbs that are not v. - if verb != 'v' { - format := f.constructOrigFormat(verb) - fmt.Fprintf(fs, format, f.value) - return - } - - if f.value == nil { - if fs.Flag('#') { - fs.Write(interfaceBytes) - } - fs.Write(nilAngleBytes) - return - } - - f.format(reflect.ValueOf(f.value)) -} - -// newFormatter is a helper function to consolidate the logic from the various -// public methods which take varying config states. -func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter { - fs := &formatState{value: v, cs: cs} - fs.pointers = make(map[uintptr]int) - return fs -} - -/* -NewFormatter returns a custom formatter that satisfies the fmt.Formatter -interface. As a result, it integrates cleanly with standard fmt package -printing functions. The formatter is useful for inline printing of smaller data -types similar to the standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Typically this function shouldn't be called directly. It is much easier to make -use of the custom formatter by calling one of the convenience functions such as -Printf, Println, or Fprintf. -*/ -func NewFormatter(v interface{}) fmt.Formatter { - return newFormatter(&Config, v) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go deleted file mode 100644 index 32c0e33..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/spew.go +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "fmt" - "io" -) - -// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the formatted string as a value that satisfies error. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Errorf(format string, a ...interface{}) (err error) { - return fmt.Errorf(format, convertArgs(a)...) -} - -// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprint(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprint(w, convertArgs(a)...) -} - -// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - return fmt.Fprintf(w, format, convertArgs(a)...) -} - -// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it -// passed with a default Formatter interface returned by NewFormatter. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprintln(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprintln(w, convertArgs(a)...) -} - -// Print is a wrapper for fmt.Print that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b)) -func Print(a ...interface{}) (n int, err error) { - return fmt.Print(convertArgs(a)...) -} - -// Printf is a wrapper for fmt.Printf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Printf(format string, a ...interface{}) (n int, err error) { - return fmt.Printf(format, convertArgs(a)...) -} - -// Println is a wrapper for fmt.Println that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b)) -func Println(a ...interface{}) (n int, err error) { - return fmt.Println(convertArgs(a)...) -} - -// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprint(a ...interface{}) string { - return fmt.Sprint(convertArgs(a)...) -} - -// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprintf(format string, a ...interface{}) string { - return fmt.Sprintf(format, convertArgs(a)...) -} - -// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it -// were passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprintln(a ...interface{}) string { - return fmt.Sprintln(convertArgs(a)...) -} - -// convertArgs accepts a slice of arguments and returns a slice of the same -// length with each argument converted to a default spew Formatter interface. -func convertArgs(args []interface{}) (formatters []interface{}) { - formatters = make([]interface{}, len(args)) - for index, arg := range args { - formatters[index] = NewFormatter(arg) - } - return formatters -} diff --git a/vendor/github.com/filecoin-project/go-address/.gitignore b/vendor/github.com/filecoin-project/go-address/.gitignore deleted file mode 100644 index 48925c7..0000000 --- a/vendor/github.com/filecoin-project/go-address/.gitignore +++ /dev/null @@ -1,18 +0,0 @@ -# Binaries for programs and plugins -*.exe -*.exe~ -*.dll -*.so -*.dylib - -# Test binary, built with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - -# Dependency directories (remove the comment below to include it) -# vendor/ - -.filecoin-build -.update-modules \ No newline at end of file diff --git a/vendor/github.com/filecoin-project/go-address/.gitmodules b/vendor/github.com/filecoin-project/go-address/.gitmodules deleted file mode 100644 index 773dea9..0000000 --- a/vendor/github.com/filecoin-project/go-address/.gitmodules +++ /dev/null @@ -1,3 +0,0 @@ -[submodule "extern/filecoin-ffi"] - path = extern/filecoin-ffi - url = https://github.com/filecoin-project/filecoin-ffi.git diff --git a/vendor/github.com/filecoin-project/go-address/CONTRIBUTING.md b/vendor/github.com/filecoin-project/go-address/CONTRIBUTING.md deleted file mode 100644 index 5118774..0000000 --- a/vendor/github.com/filecoin-project/go-address/CONTRIBUTING.md +++ /dev/null @@ -1,77 +0,0 @@ -# Contributing to this repo - -First, thank you for your interest in contributing to this project! Before you pick up your first issue and start -changing code, please: - -1. Review all documentation for the module you're interested in. -1. Look through the [issues for this repo](https://github.com/filecoin-project/go-address/issues) for relevant discussions. -1. If you have questions about an issue, post a comment in the issue. -1. If you want to submit changes that aren't covered by an issue, file a new one with your proposal, outlining what problem you found/feature you want to implement, and how you intend to implement a solution. - -For best results, before submitting a PR, make sure: -1. It has met all acceptance criteria for the issue. -1. It addresses only the one issue and does not make other, irrelevant changes. -1. Your code conforms to our coding style guide. -1. You have adequate test coverage (this should be indicated by CI results anyway). -1. If you like, check out [current PRs](https://github.com/filecoin-project/go-address/pulls) to see how others do it. - -Special Note: -If editing README.md, please conform to the [standard readme specification](https://github.com/RichardLitt/standard-readme/blob/master/spec.md). - -Before a PR can be merged to `master`, it must: -1. Pass continuous integration. -1. Be approved by at least two maintainers - -### Testing - -- All new code should be accompanied by unit tests. Prefer focused unit tests to integration tests for thorough validation of behaviour. Existing code is not necessarily a good model, here. -- Integration tests should test integration, not comprehensive functionality -- Tests should be placed in a separate package named `$PACKAGE_test`. For example, a test of the `chain` package should live in a package named `chain_test`. In limited situations, exceptions may be made for some "white box" tests placed in the same package as the code it tests. - -### Conventions and Style - -#### Imports -We use the following import ordering. -``` -import ( - [stdlib packages, alpha-sorted] - - [external packages] - - [go-address packages] -) -``` - -Where a package name does not match its directory name, an explicit alias is expected (`goimports` will add this for you). - -Example: - -```go -package address_test - -import ( - "bytes" - "encoding/base32" - "fmt" - "math" - "math/rand" - "strconv" - "testing" - "time" - - ffi "github.com/filecoin-project/filecoin-ffi" - "github.com/multiformats/go-varint" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/filecoin-project/go-crypto" - - "github.com/filecoin-project/go-address" -) -``` - -#### Comments -Comments are a communication to other developers (including your future self) to help them understand and maintain code. Good comments describe the _intent_ of the code, without repeating the procedures directly. - -- A `TODO:` comment describes a change that is desired but could not be immediately implemented. It must include a reference to a GitHub issue outlining whatever prevents the thing being done now (which could just be a matter of priority). -- A `NOTE:` comment indicates an aside, some background info, or ideas for future improvement, rather than the intent of the current code. It's often fine to document such ideas alongside the code rather than an issue (at the loss of a space for discussion). -- `FIXME`, `HACK`, `XXX` and similar tags indicating that some code is to be avoided in favour of `TODO`, `NOTE` or some straight prose. \ No newline at end of file diff --git a/vendor/github.com/filecoin-project/go-address/COPYRIGHT b/vendor/github.com/filecoin-project/go-address/COPYRIGHT deleted file mode 100644 index 771e6f7..0000000 --- a/vendor/github.com/filecoin-project/go-address/COPYRIGHT +++ /dev/null @@ -1,3 +0,0 @@ -Copyright 2019. Protocol Labs, Inc. - -This library is dual-licensed under Apache 2.0 and MIT terms. diff --git a/vendor/github.com/filecoin-project/go-address/LICENSE-APACHE b/vendor/github.com/filecoin-project/go-address/LICENSE-APACHE deleted file mode 100644 index 5465143..0000000 --- a/vendor/github.com/filecoin-project/go-address/LICENSE-APACHE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2019. Protocol Labs, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/github.com/filecoin-project/go-address/LICENSE-MIT b/vendor/github.com/filecoin-project/go-address/LICENSE-MIT deleted file mode 100644 index ea532a8..0000000 --- a/vendor/github.com/filecoin-project/go-address/LICENSE-MIT +++ /dev/null @@ -1,19 +0,0 @@ -Copyright 2019. Protocol Labs, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/filecoin-project/go-address/Makefile b/vendor/github.com/filecoin-project/go-address/Makefile deleted file mode 100644 index 6e8b72d..0000000 --- a/vendor/github.com/filecoin-project/go-address/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -all: build -.PHONY: all - - -build: - go build -.PHONY: build diff --git a/vendor/github.com/filecoin-project/go-address/README.md b/vendor/github.com/filecoin-project/go-address/README.md deleted file mode 100644 index 5a7400d..0000000 --- a/vendor/github.com/filecoin-project/go-address/README.md +++ /dev/null @@ -1,43 +0,0 @@ -# go-address -[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io) -[![CircleCI](https://circleci.com/gh/filecoin-project/go-address.svg?style=svg)](https://circleci.com/gh/filecoin-project/go-address) -[![codecov](https://codecov.io/gh/filecoin-project/go-address/branch/master/graph/badge.svg)](https://codecov.io/gh/filecoin-project/go-address) - -The filecoin address type, used for identifying actors on the filecoin network, in various formats. - -## Install - -Install this library with `go mod` - -## Usage - -Addresses support various types of encoding formats and have constructors -for each format - -```golang -// address from ID -idAddress := NewIDAddress(id) -// address from a secp pub key -secp256k1Address := NewSecp256k1Address(pubkey) -// address from data for actor protocol -actorAddress := NewActorAddress(data) -// address from the BLS pubkey -blsAddress := NewBLSAddress(pubkey) -``` - -Serialization - -```golang -var outBuf io.writer -err := address.MarshalCBOR(outbuf) -var inBuf io.reader -err := address.UnmarshalCBOR(inbuf) -``` - -## Project-level documentation -The filecoin-project has a [community repo](https://github.com/filecoin-project/community) that documents in more detail our policies and guidelines, such as discussion forums and chat rooms and [Code of Conduct](https://github.com/filecoin-project/community/blob/master/CODE_OF_CONDUCT.md). - -## License -This repository is dual-licensed under Apache 2.0 and MIT terms. - -Copyright 2019. Protocol Labs, Inc. diff --git a/vendor/github.com/filecoin-project/go-address/SECURITY.md b/vendor/github.com/filecoin-project/go-address/SECURITY.md deleted file mode 100644 index 0e810df..0000000 --- a/vendor/github.com/filecoin-project/go-address/SECURITY.md +++ /dev/null @@ -1,9 +0,0 @@ -# Security Policy - -## Reporting a Vulnerability - -For reporting *critical* and *security* bugs, please consult our [Security Policy and Responsible Disclosure Program information](https://github.com/filecoin-project/community/blob/master/SECURITY.md) - -## Reporting a non security bug - -For non-critical bugs, please simply file a GitHub issue on this repo. diff --git a/vendor/github.com/filecoin-project/go-address/address.go b/vendor/github.com/filecoin-project/go-address/address.go deleted file mode 100644 index 32aea83..0000000 --- a/vendor/github.com/filecoin-project/go-address/address.go +++ /dev/null @@ -1,408 +0,0 @@ -package address - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "math" - "strconv" - - "github.com/minio/blake2b-simd" - "github.com/multiformats/go-varint" - "golang.org/x/xerrors" - - cbg "github.com/whyrusleeping/cbor-gen" -) - -// CurrentNetwork specifies which network the address belongs to -var CurrentNetwork = Testnet - -// Address is the go type that represents an address in the filecoin network. -type Address struct{ str string } - -// Undef is the type that represents an undefined address. -var Undef = Address{} - -// Network represents which network an address belongs to. -type Network = byte - -const ( - // Mainnet is the main network. - Mainnet Network = iota - // Testnet is the test network. - Testnet -) - -// MainnetPrefix is the main network prefix. -const MainnetPrefix = "f" - -// TestnetPrefix is the test network prefix. -const TestnetPrefix = "t" - -// Protocol represents which protocol an address uses. -type Protocol = byte - -const ( - // ID represents the address ID protocol. - ID Protocol = iota - // SECP256K1 represents the address SECP256K1 protocol. - SECP256K1 - // Actor represents the address Actor protocol. - Actor - // BLS represents the address BLS protocol. - BLS - - Unknown = Protocol(255) -) - -// Protocol returns the protocol used by the address. -func (a Address) Protocol() Protocol { - if len(a.str) == 0 { - return Unknown - } - return a.str[0] -} - -// Payload returns the payload of the address. -func (a Address) Payload() []byte { - if len(a.str) == 0 { - return nil - } - return []byte(a.str[1:]) -} - -// Bytes returns the address as bytes. -func (a Address) Bytes() []byte { - return []byte(a.str) -} - -// String returns an address encoded as a string. -func (a Address) String() string { - str, err := encode(CurrentNetwork, a) - if err != nil { - panic(err) // I don't know if this one is okay - } - return str -} - -// Empty returns true if the address is empty, false otherwise. -func (a Address) Empty() bool { - return a == Undef -} - -// UnmarshalJSON implements the json unmarshal interface. -func (a *Address) UnmarshalJSON(b []byte) error { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - - addr, err := decode(s) - if err != nil { - return err - } - *a = addr - return nil -} - -// MarshalJSON implements the json marshal interface. -func (a Address) MarshalJSON() ([]byte, error) { - return []byte(`"` + a.String() + `"`), nil -} - -func (a *Address) Scan(value interface{}) error { - switch value := value.(type) { - case string: - a1, err := decode(value) - if err != nil { - return err - } - - *a = a1 - - return nil - default: - return xerrors.New("non-string types unsupported") - } -} - -// NewIDAddress returns an address using the ID protocol. -func NewIDAddress(id uint64) (Address, error) { - if id > math.MaxInt64 { - return Undef, xerrors.New("IDs must be less than 2^63") - } - return newAddress(ID, varint.ToUvarint(id)) -} - -// NewSecp256k1Address returns an address using the SECP256K1 protocol. -func NewSecp256k1Address(pubkey []byte) (Address, error) { - return newAddress(SECP256K1, addressHash(pubkey)) -} - -// NewActorAddress returns an address using the Actor protocol. -func NewActorAddress(data []byte) (Address, error) { - return newAddress(Actor, addressHash(data)) -} - -// NewBLSAddress returns an address using the BLS protocol. -func NewBLSAddress(pubkey []byte) (Address, error) { - return newAddress(BLS, pubkey) -} - -// NewFromString returns the address represented by the string `addr`. -func NewFromString(addr string) (Address, error) { - return decode(addr) -} - -// NewFromBytes return the address represented by the bytes `addr`. -func NewFromBytes(addr []byte) (Address, error) { - if len(addr) == 0 { - return Undef, nil - } - if len(addr) == 1 { - return Undef, ErrInvalidLength - } - return newAddress(addr[0], addr[1:]) -} - -// Checksum returns the checksum of `ingest`. -func Checksum(ingest []byte) []byte { - return hash(ingest, checksumHashConfig) -} - -// ValidateChecksum returns true if the checksum of `ingest` is equal to `expected`> -func ValidateChecksum(ingest, expect []byte) bool { - digest := Checksum(ingest) - return bytes.Equal(digest, expect) -} - -func addressHash(ingest []byte) []byte { - return hash(ingest, payloadHashConfig) -} - -func newAddress(protocol Protocol, payload []byte) (Address, error) { - switch protocol { - case ID: - v, n, err := varint.FromUvarint(payload) - if err != nil { - return Undef, xerrors.Errorf("could not decode: %v: %w", err, ErrInvalidPayload) - } - if n != len(payload) { - return Undef, xerrors.Errorf("different varint length (v:%d != p:%d): %w", - n, len(payload), ErrInvalidPayload) - } - if v > math.MaxInt64 { - return Undef, xerrors.Errorf("id addresses must be less than 2^63: %w", ErrInvalidPayload) - } - case SECP256K1, Actor: - if len(payload) != PayloadHashLength { - return Undef, ErrInvalidPayload - } - case BLS: - if len(payload) != BlsPublicKeyBytes { - return Undef, ErrInvalidPayload - } - default: - return Undef, ErrUnknownProtocol - } - explen := 1 + len(payload) - buf := make([]byte, explen) - - buf[0] = protocol - copy(buf[1:], payload) - - return Address{string(buf)}, nil -} - -func encode(network Network, addr Address) (string, error) { - if addr == Undef { - return UndefAddressString, nil - } - var ntwk string - switch network { - case Mainnet: - ntwk = MainnetPrefix - case Testnet: - ntwk = TestnetPrefix - default: - return UndefAddressString, ErrUnknownNetwork - } - - var strAddr string - switch addr.Protocol() { - case SECP256K1, Actor, BLS: - cksm := Checksum(append([]byte{addr.Protocol()}, addr.Payload()...)) - strAddr = ntwk + fmt.Sprintf("%d", addr.Protocol()) + AddressEncoding.WithPadding(-1).EncodeToString(append(addr.Payload(), cksm[:]...)) - case ID: - i, n, err := varint.FromUvarint(addr.Payload()) - if err != nil { - return UndefAddressString, xerrors.Errorf("could not decode varint: %w", err) - } - if n != len(addr.Payload()) { - return UndefAddressString, xerrors.Errorf("payload contains additional bytes") - } - strAddr = fmt.Sprintf("%s%d%d", ntwk, addr.Protocol(), i) - default: - return UndefAddressString, ErrUnknownProtocol - } - return strAddr, nil -} - -func decode(a string) (Address, error) { - if len(a) == 0 { - return Undef, nil - } - if a == UndefAddressString { - return Undef, nil - } - if len(a) > MaxAddressStringLength || len(a) < 3 { - return Undef, ErrInvalidLength - } - - if string(a[0]) != MainnetPrefix && string(a[0]) != TestnetPrefix { - return Undef, ErrUnknownNetwork - } - - var protocol Protocol - switch a[1] { - case '0': - protocol = ID - case '1': - protocol = SECP256K1 - case '2': - protocol = Actor - case '3': - protocol = BLS - default: - return Undef, ErrUnknownProtocol - } - - raw := a[2:] - if protocol == ID { - // 19 is length of math.MaxInt64 as a string - if len(raw) > 19 { - return Undef, ErrInvalidLength - } - id, err := strconv.ParseUint(raw, 10, 63) - if err != nil { - return Undef, ErrInvalidPayload - } - return newAddress(protocol, varint.ToUvarint(id)) - } - - payloadcksm, err := AddressEncoding.WithPadding(-1).DecodeString(raw) - if err != nil { - return Undef, err - } - - if len(payloadcksm)-ChecksumHashLength < 0 { - return Undef, ErrInvalidChecksum - } - - payload := payloadcksm[:len(payloadcksm)-ChecksumHashLength] - cksm := payloadcksm[len(payloadcksm)-ChecksumHashLength:] - - if protocol == SECP256K1 || protocol == Actor { - if len(payload) != 20 { - return Undef, ErrInvalidPayload - } - } - - if !ValidateChecksum(append([]byte{protocol}, payload...), cksm) { - return Undef, ErrInvalidChecksum - } - - return newAddress(protocol, payload) -} - -func hash(ingest []byte, cfg *blake2b.Config) []byte { - hasher, err := blake2b.New(cfg) - if err != nil { - // If this happens sth is very wrong. - panic(fmt.Sprintf("invalid address hash configuration: %v", err)) // ok - } - if _, err := hasher.Write(ingest); err != nil { - // blake2bs Write implementation never returns an error in its current - // setup. So if this happens sth went very wrong. - panic(fmt.Sprintf("blake2b is unable to process hashes: %v", err)) // ok - } - return hasher.Sum(nil) -} - -func (a Address) MarshalBinary() ([]byte, error) { - return a.Bytes(), nil -} - -func (a *Address) UnmarshalBinary(b []byte) error { - newAddr, err := NewFromBytes(b) - if err != nil { - return err - } - *a = newAddr - return nil -} - -func (a *Address) MarshalCBOR(w io.Writer) error { - if a == nil { - _, err := w.Write(cbg.CborNull) - return err - } - - if *a == Undef { - return fmt.Errorf("cannot marshal undefined address") - } - - if err := cbg.WriteMajorTypeHeader(w, cbg.MajByteString, uint64(len(a.str))); err != nil { - return err - } - - if _, err := io.WriteString(w, a.str); err != nil { - return err - } - - return nil -} - -func (a *Address) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - - if maj != cbg.MajByteString { - return fmt.Errorf("cbor type for address unmarshal was not byte string") - } - - if extra > 64 { - return fmt.Errorf("too many bytes to unmarshal for an address") - } - - buf := make([]byte, int(extra)) - if _, err := io.ReadFull(br, buf); err != nil { - return err - } - - addr, err := NewFromBytes(buf) - if err != nil { - return err - } - if addr == Undef { - return fmt.Errorf("cbor input should not contain empty addresses") - } - - *a = addr - - return nil -} - -func IDFromAddress(addr Address) (uint64, error) { - if addr.Protocol() != ID { - return 0, xerrors.Errorf("cannot get id from non id address") - } - - i, _, err := varint.FromUvarint(addr.Payload()) - return i, err -} diff --git a/vendor/github.com/filecoin-project/go-address/constants.go b/vendor/github.com/filecoin-project/go-address/constants.go deleted file mode 100644 index e3266df..0000000 --- a/vendor/github.com/filecoin-project/go-address/constants.go +++ /dev/null @@ -1,71 +0,0 @@ -package address - -import ( - "encoding/base32" - "errors" - - "github.com/minio/blake2b-simd" -) - -func init() { - - var err error - - TestAddress, err = NewActorAddress([]byte("satoshi")) - if err != nil { - panic(err) - } - - TestAddress2, err = NewActorAddress([]byte("nakamoto")) - if err != nil { - panic(err) - } -} - -var ( - // TestAddress is an account with some initial funds in it. - TestAddress Address - // TestAddress2 is an account with some initial funds in it. - TestAddress2 Address -) - -var ( - // ErrUnknownNetwork is returned when encountering an unknown network in an address. - ErrUnknownNetwork = errors.New("unknown address network") - - // ErrUnknownProtocol is returned when encountering an unknown protocol in an address. - ErrUnknownProtocol = errors.New("unknown address protocol") - // ErrInvalidPayload is returned when encountering an invalid address payload. - ErrInvalidPayload = errors.New("invalid address payload") - // ErrInvalidLength is returned when encountering an address of invalid length. - ErrInvalidLength = errors.New("invalid address length") - // ErrInvalidChecksum is returned when encountering an invalid address checksum. - ErrInvalidChecksum = errors.New("invalid address checksum") -) - -// UndefAddressString is the string used to represent an empty address when encoded to a string. -var UndefAddressString = "" - -// PayloadHashLength defines the hash length taken over addresses using the Actor and SECP256K1 protocols. -const PayloadHashLength = 20 - -// ChecksumHashLength defines the hash length used for calculating address checksums. -const ChecksumHashLength = 4 - -// MaxAddressStringLength is the max length of an address encoded as a string -// it include the network prefx, protocol, and bls publickey -const MaxAddressStringLength = 2 + 84 - -// BlsPublicKeyBytes is the length of a BLS public key -const BlsPublicKeyBytes = 48 - -// BlsPrivateKeyBytes is the length of a BLS private key -const BlsPrivateKeyBytes = 32 - -var payloadHashConfig = &blake2b.Config{Size: PayloadHashLength} -var checksumHashConfig = &blake2b.Config{Size: ChecksumHashLength} - -const encodeStd = "abcdefghijklmnopqrstuvwxyz234567" - -// AddressEncoding defines the base32 config used for address encoding and decoding. -var AddressEncoding = base32.NewEncoding(encodeStd) diff --git a/vendor/github.com/filecoin-project/go-address/testing.go b/vendor/github.com/filecoin-project/go-address/testing.go deleted file mode 100644 index 9bc4137..0000000 --- a/vendor/github.com/filecoin-project/go-address/testing.go +++ /dev/null @@ -1,20 +0,0 @@ -package address - -import ( - "fmt" -) - -// NewForTestGetter returns a closure that returns an address unique to that invocation. -// The address is unique wrt the closure returned, not globally. -func NewForTestGetter() func() Address { - i := 0 - return func() Address { - s := fmt.Sprintf("address%d", i) - i++ - newAddr, err := NewActorAddress([]byte(s)) - if err != nil { - panic(err) - } - return newAddr - } -} diff --git a/vendor/github.com/filecoin-project/go-amt-ipld/v4/LICENSE-APACHE b/vendor/github.com/filecoin-project/go-amt-ipld/v4/LICENSE-APACHE deleted file mode 100644 index 14478a3..0000000 --- a/vendor/github.com/filecoin-project/go-amt-ipld/v4/LICENSE-APACHE +++ /dev/null @@ -1,5 +0,0 @@ -Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. diff --git a/vendor/github.com/filecoin-project/go-amt-ipld/v4/LICENSE-MIT b/vendor/github.com/filecoin-project/go-amt-ipld/v4/LICENSE-MIT deleted file mode 100644 index 72dc60d..0000000 --- a/vendor/github.com/filecoin-project/go-amt-ipld/v4/LICENSE-MIT +++ /dev/null @@ -1,19 +0,0 @@ -The MIT License (MIT) - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/filecoin-project/go-amt-ipld/v4/README.md b/vendor/github.com/filecoin-project/go-amt-ipld/v4/README.md deleted file mode 100644 index 91dbd16..0000000 --- a/vendor/github.com/filecoin-project/go-amt-ipld/v4/README.md +++ /dev/null @@ -1,16 +0,0 @@ -# go-amt-ipld - -> Array Mapped Trie (Persistent Vector) implementation using go-ipld - -**This package is a reference implementation of the IPLD AMT used in the -Filecoin blockchain.** - -AMT is an array mapped trie, suitable for storing large arrays, including -sparse arrays. - -**See https://godoc.org/github.com/filecoin-project/go-amt-ipld for more - information and API details - -## License - -Dual MIT and Apache 2 diff --git a/vendor/github.com/filecoin-project/go-amt-ipld/v4/SECURITY.md b/vendor/github.com/filecoin-project/go-amt-ipld/v4/SECURITY.md deleted file mode 100644 index 0e810df..0000000 --- a/vendor/github.com/filecoin-project/go-amt-ipld/v4/SECURITY.md +++ /dev/null @@ -1,9 +0,0 @@ -# Security Policy - -## Reporting a Vulnerability - -For reporting *critical* and *security* bugs, please consult our [Security Policy and Responsible Disclosure Program information](https://github.com/filecoin-project/community/blob/master/SECURITY.md) - -## Reporting a non security bug - -For non-critical bugs, please simply file a GitHub issue on this repo. diff --git a/vendor/github.com/filecoin-project/go-amt-ipld/v4/amt.go b/vendor/github.com/filecoin-project/go-amt-ipld/v4/amt.go deleted file mode 100644 index 41f55a8..0000000 --- a/vendor/github.com/filecoin-project/go-amt-ipld/v4/amt.go +++ /dev/null @@ -1,355 +0,0 @@ -package amt - -import ( - "bytes" - "context" - "fmt" - "math" - "sort" - - cid "github.com/ipfs/go-cid" - cbor "github.com/ipfs/go-ipld-cbor" - cbg "github.com/whyrusleeping/cbor-gen" - - "github.com/filecoin-project/go-amt-ipld/v4/internal" -) - -// MaxIndex is the maximum index for elements in the AMT. This MaxUint64-1 so we -// don't overflow MaxUint64 when computing the length. -const MaxIndex = math.MaxUint64 - 1 - -// Root is described in more detail in its internal serialized form, -// internal.Root -type Root struct { - bitWidth uint - height int - count uint64 - - node *node - - store cbor.IpldStore -} - -// NewAMT creates a new, empty AMT root with the given IpldStore and options. -func NewAMT(bs cbor.IpldStore, opts ...Option) (*Root, error) { - cfg := defaultConfig() - for _, opt := range opts { - if err := opt(cfg); err != nil { - return nil, err - } - } - - return &Root{ - bitWidth: cfg.bitWidth, - store: bs, - node: new(node), - }, nil -} - -// LoadAMT loads an existing AMT from the given IpldStore using the given -// root CID. An error will be returned where the AMT identified by the CID -// does not exist within the IpldStore. If the given options, or their defaults, -// do not match the AMT found at the given CID, an error will be returned. -func LoadAMT(ctx context.Context, bs cbor.IpldStore, c cid.Cid, opts ...Option) (*Root, error) { - cfg := defaultConfig() - for _, opt := range opts { - if err := opt(cfg); err != nil { - return nil, err - } - } - - var r internal.Root - if err := bs.Get(ctx, c, &r); err != nil { - return nil, err - } - - // Check the bitwidth but don't rely on it. We may add an option in the - // future to just discover the bitwidth from the AMT, but we need to be - // careful to not just trust the value. - if r.BitWidth != uint64(cfg.bitWidth) { - return nil, fmt.Errorf("expected bitwidth %d but AMT has bitwidth %d", cfg.bitWidth, r.BitWidth) - } - - // Make sure the height is sane to prevent any integer overflows later - // (e.g., height+1). While MaxUint64-1 would solve the "+1" issue, we - // might as well use 64 because the height cannot be greater than 62 - // (min width = 2, 2**64 == max elements). - if r.Height > 64 { - return nil, fmt.Errorf("height greater than 64: %d", r.Height) - } - - maxNodes := nodesForHeight(cfg.bitWidth, int(r.Height+1)) - // nodesForHeight saturates. If "max nodes" is max uint64, the maximum - // number of nodes at the previous level muss be less. This is the - // simplest way to check to see if the height is sane. - if maxNodes == math.MaxUint64 && nodesForHeight(cfg.bitWidth, int(r.Height)) == math.MaxUint64 { - return nil, fmt.Errorf("failed to load AMT: height %d out of bounds", r.Height) - } - - // If max nodes is less than the count, something is wrong. - if maxNodes < r.Count { - return nil, fmt.Errorf( - "failed to load AMT: not tall enough (%d) for count (%d)", r.Height, r.Count, - ) - } - - nd, err := newNode(r.Node, cfg.bitWidth, r.Height == 0, r.Height == 0) - if err != nil { - return nil, err - } - - return &Root{ - bitWidth: cfg.bitWidth, - height: int(r.Height), - count: r.Count, - node: nd, - store: bs, - }, nil -} - -// FromArray creates a new AMT and performs a BatchSet on it using the vals and -// options provided. Indexes from the array are used as the indexes for the same -// values in the AMT. -func FromArray(ctx context.Context, bs cbor.IpldStore, vals []cbg.CBORMarshaler, opts ...Option) (cid.Cid, error) { - r, err := NewAMT(bs, opts...) - if err != nil { - return cid.Undef, err - } - if err := r.BatchSet(ctx, vals); err != nil { - return cid.Undef, err - } - - return r.Flush(ctx) -} - -// Set will add or update entry at index i with value val. The index must be -// within lower than MaxIndex. -// -// Where val has a compatible CBORMarshaler() it will be used to serialize the -// object into CBOR. Otherwise the generic go-ipld-cbor DumbObject() will be -// used. -// -// Setting a new index that is greater than the current capacity of the -// existing AMT structure will result in the creation of additional nodes to -// form a structure of enough height to contain the new index. -// -// The height required to store any given index can be calculated by finding -// the lowest (width^(height+1) - 1) that is higher than the index. For example, -// a height of 1 on an AMT with a width of 8 (bitWidth of 3) can fit up to -// indexes of 8^2 - 1, or 63. At height 2, indexes up to 511 can be stored. So a -// Set operation for an index between 64 and 511 will require that the AMT have -// a height of at least 3. Where an AMT has a height less than 3, additional -// nodes will be added until the height is 3. -func (r *Root) Set(ctx context.Context, i uint64, val cbg.CBORMarshaler) error { - if i > MaxIndex { - return fmt.Errorf("index %d is out of range for the amt", i) - } - - var d cbg.Deferred - if val == nil { - d.Raw = cbg.CborNull - } else { - valueBuf := new(bytes.Buffer) - if err := val.MarshalCBOR(valueBuf); err != nil { - return err - } - d.Raw = valueBuf.Bytes() - } - - // where the index is greater than the number of elements we can fit into the - // current AMT, grow it until it will fit. - for i >= nodesForHeight(r.bitWidth, r.height+1) { - // if we have existing data, perform the re-height here by pushing down - // the existing tree into the left-most portion of a new root - if !r.node.empty() { - nd := r.node - // since all our current elements fit in the old height, we _know_ that - // they will all sit under element [0] of this new node. - r.node = &node{links: make([]*link, 1<= (MaxIndex - 1) { - return errInvalidCount - } - r.count++ - } - - return nil -} - -// BatchSet takes an array of vals and performs a Set on each of them on an -// existing AMT. Indexes from the array are used as indexes for the same values -// in the AMT. -// -// This is currently a convenience method and does not perform optimizations -// above iterative Set calls for each entry. -func (r *Root) BatchSet(ctx context.Context, vals []cbg.CBORMarshaler) error { - // TODO: there are more optimized ways of doing this method - for i, v := range vals { - if err := r.Set(ctx, uint64(i), v); err != nil { - return err - } - } - return nil -} - -// Get retrieves a value from index i. -// If the index is set, returns true and, if the `out` parameter is not nil, -// deserializes the value into that interface. Returns false if the index is not set. -func (r *Root) Get(ctx context.Context, i uint64, out cbg.CBORUnmarshaler) (bool, error) { - if i > MaxIndex { - return false, fmt.Errorf("index %d is out of range for the amt", i) - } - - // easy shortcut case, index is too large for our height, don't bother looking - // further - if i >= nodesForHeight(r.bitWidth, r.height+1) { - return false, nil - } - return r.node.get(ctx, r.store, r.bitWidth, r.height, i, out) -} - -// BatchDelete performs a bulk Delete operation on an array of indices. Each -// index in the given indices array will be removed from the AMT, if it is present. -// If `strict` is true, all indices are expected to be present, and this will return an error -// if one is not found. -// -// Returns true if the AMT was modified as a result of this operation. -// -// There is no special optimization applied to this method, it is simply a -// convenience wrapper around Delete for an array of indices. -func (r *Root) BatchDelete(ctx context.Context, indices []uint64, strict bool) (modified bool, err error) { - // TODO: theres a faster way of doing this, but this works for now - - // Sort by index so we can safely implement these optimizations in the future. - less := func(i, j int) bool { return indices[i] < indices[j] } - if !sort.SliceIsSorted(indices, less) { - // Copy first so we don't modify our inputs. - indices = append(indices[0:0:0], indices...) - sort.Slice(indices, less) - } - - for _, i := range indices { - found, err := r.Delete(ctx, i) - if err != nil { - return false, err - } else if strict && !found { - return false, fmt.Errorf("no such index %d", i) - } - modified = modified || found - } - return modified, nil -} - -// Delete removes an index from the AMT. -// Returns true if the index was present and removed, or false if the index -// was not set. -// -// If this delete operation leaves nodes with no remaining elements, the height -// will be reduced to fit the maximum remaining index, leaving the AMT in -// canonical form for the given set of data that it contains. -func (r *Root) Delete(ctx context.Context, i uint64) (bool, error) { - if i > MaxIndex { - return false, fmt.Errorf("index %d is out of range for the amt", i) - } - - // shortcut, index is greater than what we hold so we know it's not there - if i >= nodesForHeight(r.bitWidth, r.height+1) { - return false, nil - } - - found, err := r.node.delete(ctx, r.store, r.bitWidth, r.height, i) - if err != nil { - return false, err - } else if !found { - return false, nil - } - - // The AMT invariant dictates that for any non-empty AMT, the root node must - // not address only its left-most child node. Where a deletion has created a - // state where the current root node only consists of a link to the left-most - // child and no others, that child node must become the new root node (i.e. - // the height is reduced by 1). We perform the same check on the new root node - // such that we reduce the AMT to canonical form for this data set. - // In the extreme case, it is possible to perform a collapse from a large - // `height` to height=0 where the index being removed is very large and there - // remains no other indexes or the remaining indexes are in the range of 0 to - // bitWidth^8. - // See node.collapse() for more notes. - newHeight, err := r.node.collapse(ctx, r.store, r.bitWidth, r.height) - if err != nil { - return false, err - } - r.height = newHeight - - // Something is very wrong but there's not much we can do. So we perform - // the operation and then tell the user that something is wrong. - if r.count == 0 { - return false, errInvalidCount - } - - r.count-- - return true, nil -} - -// ForEach iterates over the entire AMT and calls the cb function for each -// entry found in the leaf nodes. The callback will receive the index and the -// value of each element. -func (r *Root) ForEach(ctx context.Context, cb func(uint64, *cbg.Deferred) error) error { - return r.node.forEachAt(ctx, r.store, r.bitWidth, r.height, 0, 0, cb) -} - -// ForEachAt iterates over the AMT beginning from the given start index. See -// ForEach for more details. -func (r *Root) ForEachAt(ctx context.Context, start uint64, cb func(uint64, *cbg.Deferred) error) error { - return r.node.forEachAt(ctx, r.store, r.bitWidth, r.height, start, 0, cb) -} - -// FirstSetIndex finds the lowest index in this AMT that has a value set for -// it. If this operation is called on an empty AMT, an ErrNoValues will be -// returned. -func (r *Root) FirstSetIndex(ctx context.Context) (uint64, error) { - return r.node.firstSetIndex(ctx, r.store, r.bitWidth, r.height) -} - -// Flush saves any unsaved node data and recompacts the in-memory forms of each -// node where they have been expanded for operational use. -func (r *Root) Flush(ctx context.Context) (cid.Cid, error) { - nd, err := r.node.flush(ctx, r.store, r.bitWidth, r.height) - if err != nil { - return cid.Undef, err - } - root := internal.Root{ - BitWidth: uint64(r.bitWidth), - Height: uint64(r.height), - Count: r.count, - Node: *nd, - } - return r.store.Put(ctx, &root) -} - -// Len returns the "Count" property that is stored in the root of this AMT. -// It's correctness is only guaranteed by the consistency of the build of the -// AMT (i.e. this code). A "secure" count would require iterating the entire -// tree, but if all nodes are part of a trusted structure (e.g. one where we -// control the entire build, or verify all incoming blocks from untrusted -// sources) then we ought to be able to say "count" is correct. -func (r *Root) Len() uint64 { - return r.count -} diff --git a/vendor/github.com/filecoin-project/go-amt-ipld/v4/diff.go b/vendor/github.com/filecoin-project/go-amt-ipld/v4/diff.go deleted file mode 100644 index 105f938..0000000 --- a/vendor/github.com/filecoin-project/go-amt-ipld/v4/diff.go +++ /dev/null @@ -1,389 +0,0 @@ -package amt - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - - "golang.org/x/xerrors" - - "github.com/ipfs/go-cid" - - cbor "github.com/ipfs/go-ipld-cbor" - cbg "github.com/whyrusleeping/cbor-gen" -) - -// ChangeType denotes type of change in Change -type ChangeType int - -// These constants define the changes that can be applied to a DAG. -const ( - Add ChangeType = iota - Remove - Modify -) - -// Change represents a change to a DAG and contains a reference to the old and -// new CIDs. -type Change struct { - Type ChangeType - Key uint64 - Before *cbg.Deferred - After *cbg.Deferred -} - -func (ch Change) String() string { - b, _ := json.Marshal(ch) - return string(b) -} - -// Diff returns a set of changes that transform node 'a' into node 'b'. opts are applied to both prev and cur. -func Diff(ctx context.Context, prevBs, curBs cbor.IpldStore, prev, cur cid.Cid, opts ...Option) ([]*Change, error) { - prevAmt, err := LoadAMT(ctx, prevBs, prev, opts...) - if err != nil { - return nil, xerrors.Errorf("loading previous root: %w", err) - } - - prevCtx := &nodeContext{ - bs: prevBs, - bitWidth: prevAmt.bitWidth, - height: prevAmt.height, - } - - curAmt, err := LoadAMT(ctx, curBs, cur, opts...) - if err != nil { - return nil, xerrors.Errorf("loading current root: %w", err) - } - - // TODO: remove when https://github.com/filecoin-project/go-amt-ipld/issues/54 is closed. - if curAmt.bitWidth != prevAmt.bitWidth { - return nil, xerrors.Errorf("diffing AMTs with differing bitWidths not supported (prev=%d, cur=%d)", prevAmt.bitWidth, curAmt.bitWidth) - } - - curCtx := &nodeContext{ - bs: curBs, - bitWidth: curAmt.bitWidth, - height: curAmt.height, - } - - // edge case of diffing an empty AMT against non-empty - if prevAmt.count == 0 && curAmt.count != 0 { - return addAll(ctx, curCtx, curAmt.node, 0) - } - if prevAmt.count != 0 && curAmt.count == 0 { - return removeAll(ctx, prevCtx, prevAmt.node, 0) - } - return diffNode(ctx, prevCtx, curCtx, prevAmt.node, curAmt.node, 0) -} - -type nodeContext struct { - bs cbor.IpldStore // store containining AMT data - bitWidth uint // bit width of AMT - height int // height of node -} - -// nodesAtHeight returns the number of nodes that can be held at the context height -func (nc *nodeContext) nodesAtHeight() uint64 { - return nodesForHeight(nc.bitWidth, nc.height) -} - -func diffNode(ctx context.Context, prevCtx, curCtx *nodeContext, prev, cur *node, offset uint64) ([]*Change, error) { - if prev == nil && cur == nil { - return nil, nil - } - - if prev == nil { - return addAll(ctx, curCtx, cur, offset) - } - - if cur == nil { - return removeAll(ctx, prevCtx, prev, offset) - } - - if prevCtx.height == 0 && curCtx.height == 0 { - return diffLeaves(prev, cur, offset) - } - - var changes []*Change - - if curCtx.height > prevCtx.height { - subCount := curCtx.nodesAtHeight() - for i, ln := range cur.links { - if ln == nil || ln.cid == cid.Undef { - continue - } - - subCtx := &nodeContext{ - bs: curCtx.bs, - bitWidth: curCtx.bitWidth, - height: curCtx.height - 1, - } - - subn, err := ln.load(ctx, subCtx.bs, subCtx.bitWidth, subCtx.height) - if err != nil { - return nil, err - } - - offs := offset + (uint64(i) * subCount) - if i == 0 { - cs, err := diffNode(ctx, prevCtx, subCtx, prev, subn, offs) - if err != nil { - return nil, err - } - - changes = append(changes, cs...) - } else { - cs, err := addAll(ctx, subCtx, subn, offs) - if err != nil { - return nil, err - } - - changes = append(changes, cs...) - } - } - - return changes, nil - } - - if prevCtx.height > curCtx.height { - subCount := prevCtx.nodesAtHeight() - for i, ln := range prev.links { - if ln == nil || ln.cid == cid.Undef { - continue - } - - subCtx := &nodeContext{ - bs: prevCtx.bs, - bitWidth: prevCtx.bitWidth, - height: prevCtx.height - 1, - } - - subn, err := ln.load(ctx, subCtx.bs, subCtx.bitWidth, subCtx.height) - if err != nil { - return nil, err - } - - offs := offset + (uint64(i) * subCount) - - if i == 0 { - cs, err := diffNode(ctx, subCtx, curCtx, subn, cur, offs) - if err != nil { - return nil, err - } - - changes = append(changes, cs...) - } else { - cs, err := removeAll(ctx, subCtx, subn, offs) - if err != nil { - return nil, err - } - - changes = append(changes, cs...) - } - } - - return changes, nil - } - - // sanity check - if prevCtx.height != curCtx.height { - return nil, fmt.Errorf("comparing non-leaf nodes of unequal heights (%d, %d)", prevCtx.height, curCtx.height) - } - - if len(prev.links) != len(cur.links) { - return nil, fmt.Errorf("nodes have different numbers of links (prev=%d, cur=%d)", len(prev.links), len(cur.links)) - } - - if prev.links == nil || cur.links == nil { - return nil, fmt.Errorf("nodes have no links") - } - - subCount := prevCtx.nodesAtHeight() - for i := range prev.links { - // Neither previous or current links are in use - if prev.links[i] == nil && cur.links[i] == nil { - continue - } - - // Previous had link, current did not - if prev.links[i] != nil && cur.links[i] == nil { - if prev.links[i].cid == cid.Undef { - continue - } - - subCtx := &nodeContext{ - bs: prevCtx.bs, - bitWidth: prevCtx.bitWidth, - height: prevCtx.height - 1, - } - - subn, err := prev.links[i].load(ctx, subCtx.bs, subCtx.bitWidth, subCtx.height) - if err != nil { - return nil, err - } - - offs := offset + (uint64(i) * subCount) - cs, err := removeAll(ctx, subCtx, subn, offs) - if err != nil { - return nil, err - } - - changes = append(changes, cs...) - - continue - } - - // Current has link, previous did not - if prev.links[i] == nil && cur.links[i] != nil { - if cur.links[i].cid == cid.Undef { - continue - } - - subCtx := &nodeContext{ - bs: curCtx.bs, - bitWidth: curCtx.bitWidth, - height: curCtx.height - 1, - } - - subn, err := cur.links[i].load(ctx, subCtx.bs, subCtx.bitWidth, subCtx.height) - if err != nil { - return nil, err - } - - offs := offset + (uint64(i) * subCount) - cs, err := addAll(ctx, subCtx, subn, offs) - if err != nil { - return nil, err - } - - changes = append(changes, cs...) - - continue - } - - // Both previous and current have links to diff - - prevSubCtx := &nodeContext{ - bs: prevCtx.bs, - bitWidth: prevCtx.bitWidth, - height: prevCtx.height - 1, - } - - prevSubn, err := prev.links[i].load(ctx, prevSubCtx.bs, prevSubCtx.bitWidth, prevSubCtx.height) - if err != nil { - return nil, err - } - - curSubCtx := &nodeContext{ - bs: curCtx.bs, - bitWidth: curCtx.bitWidth, - height: curCtx.height - 1, - } - - curSubn, err := cur.links[i].load(ctx, curSubCtx.bs, curSubCtx.bitWidth, curSubCtx.height) - if err != nil { - return nil, err - } - - offs := offset + (uint64(i) * subCount) - - cs, err := diffNode(ctx, prevSubCtx, curSubCtx, prevSubn, curSubn, offs) - if err != nil { - return nil, err - } - - changes = append(changes, cs...) - } - - return changes, nil -} - -func addAll(ctx context.Context, nc *nodeContext, node *node, offset uint64) ([]*Change, error) { - var changes []*Change - err := node.forEachAt(ctx, nc.bs, nc.bitWidth, nc.height, 0, offset, func(index uint64, deferred *cbg.Deferred) error { - changes = append(changes, &Change{ - Type: Add, - Key: index, - Before: nil, - After: deferred, - }) - - return nil - }) - if err != nil { - return nil, err - } - - return changes, nil -} - -func removeAll(ctx context.Context, nc *nodeContext, node *node, offset uint64) ([]*Change, error) { - var changes []*Change - - err := node.forEachAt(ctx, nc.bs, nc.bitWidth, nc.height, 0, offset, func(index uint64, deferred *cbg.Deferred) error { - changes = append(changes, &Change{ - Type: Remove, - Key: index, - Before: deferred, - After: nil, - }) - - return nil - }) - if err != nil { - return nil, err - } - - return changes, nil -} - -func diffLeaves(prev, cur *node, offset uint64) ([]*Change, error) { - if len(prev.values) != len(cur.values) { - return nil, fmt.Errorf("node leaves have different numbers of values (prev=%d, cur=%d)", len(prev.values), len(cur.values)) - } - - var changes []*Change - for i, prevVal := range prev.values { - index := offset + uint64(i) - - curVal := cur.values[i] - if prevVal == nil && curVal == nil { - continue - } - - if prevVal == nil && curVal != nil { - changes = append(changes, &Change{ - Type: Add, - Key: index, - Before: nil, - After: curVal, - }) - - continue - } - - if prevVal != nil && curVal == nil { - changes = append(changes, &Change{ - Type: Remove, - Key: index, - Before: prevVal, - After: nil, - }) - - continue - } - - if !bytes.Equal(prevVal.Raw, curVal.Raw) { - changes = append(changes, &Change{ - Type: Modify, - Key: index, - Before: prevVal, - After: curVal, - }) - } - - } - - return changes, nil -} diff --git a/vendor/github.com/filecoin-project/go-amt-ipld/v4/doc.go b/vendor/github.com/filecoin-project/go-amt-ipld/v4/doc.go deleted file mode 100644 index 9bac2e4..0000000 --- a/vendor/github.com/filecoin-project/go-amt-ipld/v4/doc.go +++ /dev/null @@ -1,187 +0,0 @@ -/* -Package amt provides a reference implementation of the IPLD AMT (Array Mapped -Trie) used in the Filecoin blockchain. - -The AMT algorithm is similar to a HAMT -https://en.wikipedia.org/wiki/Hash_array_mapped_trie but instead presents an -array-like interface where the indexes themselves form the mapping to nodes in -the trie structure. An AMT is suitable for storing sparse array data as a -minimum amount of intermediate nodes are required to address a small number of -entries even when their indexes span a large distance. AMT is also a suitable -means of storing non-sparse array data as required, with a small amount of -storage and algorithmic overhead required to handle mapping that assumes that -some elements within any range of data may not be present. - -Algorithm Overview - -The AMT algorithm produces a tree-like graph, with a single root node -addressing a collection of child nodes which connect downward toward leaf nodes -which store the actual entries. No terminal entries are stored in intermediate -elements of the tree, unlike in a HAMT. We can divide up the AMT tree structure -into "levels" or "heights", where a height of zero contains the terminal -elements, and the maximum height of the tree contains the single root node. -Intermediate nodes are used to span across the range of indexes. - -Any AMT instance uses a fixed "width" that is consistent across the tree's -nodes. An AMT's "bitWidth" dictates the width, or maximum-brancing factor -(arity) of the AMT's nodes by determining how many bits of the original index -are used to determine the index at any given level. A bitWidth of 3 (the -default for this implementation) can generate indexes in the range of 0 to -(2^3)-1=7, i.e. a "width" of 8. In practice, this means that an AMT with a -bitWidth of 3 has a branching factor of _between 1 and 8_ for any node in the -structure. - -Considering the minimal case: a minimal AMT contains a single node which serves -as both the root and the leaf node and can hold zero or more elements -(an empty AMT is possible, although a special-case, and consists of a -zero-length root). This minimal AMT can store array indexes from 0 to width-1 -(8 for the default bitWidth of 3) without requiring the addition of additional -nodes. Attempts to add additional indexes beyond width-1 will result in -additional nodes being added and a tree structure in order to address the new -elements. The minimal AMT node is said to have a height of 0. Every node in an -AMT has a height that indicates its distance from the leaf nodes. All leaf -nodes have a height of 0. The height of the root node dictates the overall -height of the entire AMT. In the case of the minimal AMT, this is 0. - -Elements are stored in a compacted form within nodes, they are -"position-mapped" by a bitmap field that is stored with the node. The bitmap is -a simple byte array, where each bit represents an element of the data that can -be stored in the node. With a width of 8, the bitmap is a single byte and up to -8 elements can be stored in the node. The data array of a node _only stores -elements that are present in that node_, so the array is commonly shorter than -the maximum width. An empty AMT is a special-case where the single node can -have zero elements, therefore a zero-length data array and a bitmap of `0x00`. -In all other cases, the data array must have between 1 and width elements. - -Determining the position of an index within the data array requires counting -the number of set bits within the bitmap up to the element we are concerned -with. If the bitmap has bits 2, 4 and 6 set, we can see that only 3 of the bits -are set so our data array should hold 3 elements. To address index 4, we know -that the first element will be index 2 and therefore the second will hold index -4. This format allows us to store only the elements that are set in the node. - -Overflow beyond the single node AMT by adding an index beyond width-1 requires -an increase in height in order to address all elements. If an element in the -range of width to (width*2)-1 is added, a single additional height is required -which will result in a new root node which is used to address two consecutive -leaf nodes. Because we have an arity of up to width at any node, the addition -of indexes in the range of 0 to (width^2)-1 will still require only the -addition of a single additional height above the leaf nodes, i.e. height 1. - -From the width of an AMT we can derive the maximum range of indexes that can be -contained by an AMT at any given `height` with the formula width^(height+1)-1. -e.g. an AMT with a width of 8 and a height of 2 can address indexes 0 to -8^(2+1)-1=511. Incrementing the height doubles the range of indexes that can be -contained within that structure. - -Nodes above height 0 (non-leaf nodes) do not contain terminal elements, but -instead, their data array contains links to child nodes. The index compaction -using the bitmap is the same as for leaf nodes, so each non-leaf node only -stores as many links as it has child nodes. - -Because additional height is required to address larger indexes, even a -single-element AMT will require more than one node where the index is greater -than the width of the AMT. For a width of 8, indexes 8 to 63 require a height -of 1, indexes 64 to 511 require a height of 2, indexes 512 to 4095 require a -height of 3, etc. - -Retrieving elements from the AMT requires extracting only the portion of the -requested index that is required at each height to determine the position in -the data array to navigate into. When traversing through the tree, we only need -to select from indexes 0 to width-1. To do this, we take log2(width) bits from -the index to form a number that is between 0 and width-1. e.g. for a width of -8, we only need 3 bits to form a number between 0 and 7, so we only consume -3 bits per level of the AMT as we traverse. A simple method to calculate this -at any height in the AMT (assuming bitWidth of 3, i.e. a width of 8) is: - -1. Calculate the maximum number of nodes (not entries) that may be present in -an sub-tree rooted at the current height. width^height provides this number. -e.g. at height 0, only 1 node can be present, but at height 3, we may have a -tree of up to 512 nodes (storing up to 8^(3+1)=4096 entries). - -2. Divide the index by this number to find the index for this height. e.g. an -index of 3 at height 0 will be 3/1=3, or an index of 20 at height 1 will be -20/8=2. - -3. If we are at height 0, the element we want is at the data index, -position-mapped via the bitmap. - -4. If we are above height 0, we need to navigate to the child element at the -index we calculated, position-mapped via the bitmap. When traversing to the -child, we discard the upper portion of the index that we no longer need. -This can be achieved by a mod operation against the number-of-nodes value. -e.g. an index of 20 at height 1 requires navigation to the element at -position 2, when moving to that element (which is height 0), we truncate the -index with 20%8=4, at height 0 this index will be the index in our data -array (position-mapped via the bitmap). - -In this way, each sub-tree root consumes a small slice, log2(width) bits long, -of the original index. - -Adding new elements to an AMT may require up to 3 steps: - -1. Increasing the height to accommodate a new index if the current height is -not sufficient to address the new index. Increasing the height requires turning -the current root node into an intermediate and adding a new root which -links to the old (repeated until the required height is reached). - -2. Adding any missing intermediate and leaf nodes that are required to address -the new index. Depending on the density of existing indexes, this may require -the addition of up to height-1 new nodes to connect the root to the required -leaf. Sparse indexes will mean large gaps in the tree that will need filling to -address new, equally sparse, indexes. - -3. Setting the element at the leaf node in the appropriate position in the data -array and setting the appropriate bit in the bitmap. - -Removing elements requires a reversal of this process. Any empty node (other -than the case of a completely empty AMT) must be removed and its parent should -have its child link removed. This removal may recurse up the tree to remove -many unnecessary intermediate nodes. The root node may also be removed if the -current height is no longer necessary to contain the range of indexes still in -the AMT. This can be easily determined if _only_ the first bit of the root's -bitmap is set, meaning only the left-most is present, which will become the -new root node (repeated until the new root has more than the first bit set or -height of 0, the single-node case). - -Further Reading - -See https://github.com/ipld/specs/blob/master/data-structures/hashmap.md for a -description of a HAMT algorithm. And -https://github.com/ipld/specs/blob/master/data-structures/vector.md for a -description of a similar algorithm to an AMT that doesn't support internal node -compression and therefore doesn't support sparse arrays. - -Usage Considerations - -Unlike a HAMT, the AMT algorithm doesn't benefit from randomness introduced by -a hash algorithm. Therefore an AMT used in cases where user-input can -influence indexes, larger-than-necessary tree structures may present risks as -well as the challenge imposed by having a strict upper-limit on the indexes -addressable by the AMT. A width of 8, using 64-bit integers for indexing, -allows for a tree height of up to 64/log2(8)=21 (i.e. a width of 8 has a -bitWidth of 3, dividing the 64 bits of the uint into 21 separate per-height -indexes). Careful placement of indexes could create extremely sub-optimal forms -with large heights connecting leaf nodes that are sparsely packed. The overhead -of the large number of intermediate nodes required to connect leaf nodes in -AMTs that contain high indexes can be abused to create perverse forms that -contain large numbers of nodes to store a minimal number of elements. - -Minimal nodes will be created where indexes are all in the lower-range. The -optimal case for an AMT is contiguous index values starting from zero. As -larger indexes are introduced that span beyond the current maximum, more nodes -are required to address the new nodes _and_ the existing lower index nodes. -Consider a case where a width=8 AMT is only addressing indexes less than 8 and -requiring a single height. The introduction of a single index within 8 of the -maximum 64-bit unsigned integer range will require the new root to have a -height of 21 and have enough connecting nodes between it and both the existing -elements and the new upper index. This pattern of behavior may be acceptable if -there is significant density of entries under a particular maximum index. - -There is a direct relationship between the sparseness of index values and the -number of nodes required to address the entries. This should be the key -consideration when determining whether an AMT is a suitable data-structure for -a given application. - -*/ -package amt diff --git a/vendor/github.com/filecoin-project/go-amt-ipld/v4/internal/cbor_gen.go b/vendor/github.com/filecoin-project/go-amt-ipld/v4/internal/cbor_gen.go deleted file mode 100644 index 3477b4f..0000000 --- a/vendor/github.com/filecoin-project/go-amt-ipld/v4/internal/cbor_gen.go +++ /dev/null @@ -1,279 +0,0 @@ -// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. - -package internal - -import ( - "fmt" - "io" - - cid "github.com/ipfs/go-cid" - cbg "github.com/whyrusleeping/cbor-gen" - xerrors "golang.org/x/xerrors" -) - -var _ = xerrors.Errorf - -var lengthBufRoot = []byte{132} - -func (t *Root) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufRoot); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.BitWidth (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.BitWidth)); err != nil { - return err - } - - // t.Height (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Height)); err != nil { - return err - } - - // t.Count (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Count)); err != nil { - return err - } - - // t.Node (internal.Node) (struct) - if err := t.Node.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *Root) UnmarshalCBOR(r io.Reader) error { - *t = Root{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 4 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.BitWidth (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.BitWidth = uint64(extra) - - } - // t.Height (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Height = uint64(extra) - - } - // t.Count (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Count = uint64(extra) - - } - // t.Node (internal.Node) (struct) - - { - - if err := t.Node.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Node: %w", err) - } - - } - return nil -} - -var lengthBufNode = []byte{131} - -func (t *Node) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufNode); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Bmap ([]uint8) (slice) - if len(t.Bmap) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.Bmap was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Bmap))); err != nil { - return err - } - - if _, err := w.Write(t.Bmap[:]); err != nil { - return err - } - - // t.Links ([]cid.Cid) (slice) - if len(t.Links) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Links was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Links))); err != nil { - return err - } - for _, v := range t.Links { - if err := cbg.WriteCidBuf(scratch, w, v); err != nil { - return xerrors.Errorf("failed writing cid field t.Links: %w", err) - } - } - - // t.Values ([]*typegen.Deferred) (slice) - if len(t.Values) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Values was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Values))); err != nil { - return err - } - for _, v := range t.Values { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - return nil -} - -func (t *Node) UnmarshalCBOR(r io.Reader) error { - *t = Node{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 3 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Bmap ([]uint8) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.Bmap: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - - if extra > 0 { - t.Bmap = make([]uint8, extra) - } - - if _, err := io.ReadFull(br, t.Bmap[:]); err != nil { - return err - } - // t.Links ([]cid.Cid) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Links: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Links = make([]cid.Cid, extra) - } - - for i := 0; i < int(extra); i++ { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("reading cid field t.Links failed: %w", err) - } - t.Links[i] = c - } - - // t.Values ([]*typegen.Deferred) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Values: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Values = make([]*cbg.Deferred, extra) - } - - for i := 0; i < int(extra); i++ { - - var v cbg.Deferred - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.Values[i] = &v - } - - return nil -} diff --git a/vendor/github.com/filecoin-project/go-amt-ipld/v4/internal/internal.go b/vendor/github.com/filecoin-project/go-amt-ipld/v4/internal/internal.go deleted file mode 100644 index 452b2ee..0000000 --- a/vendor/github.com/filecoin-project/go-amt-ipld/v4/internal/internal.go +++ /dev/null @@ -1,78 +0,0 @@ -package internal - -import ( - cid "github.com/ipfs/go-cid" - cbg "github.com/whyrusleeping/cbor-gen" -) - -// Node represents any node within the AMT, including the root, intermediate -// and leaf nodes. For the minimal case of AMT, there may be a single Node -// containing all data. As the highest-index grows, more intermediate nodes -// are added. -// -// A Node will strictly either be a leaf node (height 0) or a non-leaf (root -// or intermediate, height 1). Leaf nodes contain an array of one or more -// Values, where non-leaf nodes contain an array of one or more Links to child -// nodes. -// -// The Bmap (bitmap) has the same number of bits as the "width" of the AMT -// (bitWidth^2), where each bit in the bitmap indicates the presence (1) or -// absence (0) of a value or link to a child node. In this way, the serialized -// form, and in-memory form of a Node contains only the value or links present. -// There must be at least one value for height=0 nodes and at least one link for -// height>0 nodes. Nodes with no links or values are invalid and the AMT will -// not have canonical form. -// -// Each node is serialized in the following form, described as an IPLD Schema: -// -// type Node struct { -// bmap Bytes -// links [&Node] -// values [Any] -// } representation tuple -// -// Where bmap is strictly a byte array of length (bitWidth^2)/8 and the links -// and values arrays are between zero and the width of this AMT (bitWidth^2). -// One of links or values arrays must contain zero elements and one must contain -// at least one element since a node is strictly either a leaf or a non-leaf. -type Node struct { - Bmap []byte - Links []cid.Cid - Values []*cbg.Deferred -} - -// Root is the single entry point for this AMT. It is serialized with an inner -// root Node element. -// -// The bitWidth property dictates the number of bits used to generate an index -// at each level from the addressible index supplied by the user. -// -// The height property is essential for understanding how deep to navigate to -// value-holding leaf nodes and therefore how many bits of an index will be -// required for navigation. -// -// The count property is maintained during ongoing mutation of the AMT and can -// be used as a fast indicator of the size of the structure. It is assumed to -// be correct if the nodes of the AMT were part of a trusted construction or -// have been verified. It is not essential to the construction or navigation of -// the AMT but is helpful for fast Len() calls. -// Performing a secure count would require navigating through all leaf nodes -// and adding up the number of occupied slots. -// -// The root is serialized in the following form, described as an IPLD Schema: -// -// type Root struct { -// bitWidth Int -// height Int -// count Int -// node Node -// } representation tuple -// -// Where bitWidth, height and count are unsigned integers and Node is the -// initial root node, see below. -type Root struct { - BitWidth uint64 - Height uint64 - Count uint64 - Node Node -} diff --git a/vendor/github.com/filecoin-project/go-amt-ipld/v4/link.go b/vendor/github.com/filecoin-project/go-amt-ipld/v4/link.go deleted file mode 100644 index 82bb4fc..0000000 --- a/vendor/github.com/filecoin-project/go-amt-ipld/v4/link.go +++ /dev/null @@ -1,32 +0,0 @@ -package amt - -import ( - "context" - - "github.com/filecoin-project/go-amt-ipld/v4/internal" - cid "github.com/ipfs/go-cid" - cbor "github.com/ipfs/go-ipld-cbor" -) - -type link struct { - cid cid.Cid - - cached *node - dirty bool -} - -func (l *link) load(ctx context.Context, bs cbor.IpldStore, bitWidth uint, height int) (*node, error) { - if l.cached == nil { - var nd internal.Node - if err := bs.Get(ctx, l.cid, &nd); err != nil { - return nil, err - } - - n, err := newNode(nd, bitWidth, false, height == 0) - if err != nil { - return nil, err - } - l.cached = n - } - return l.cached, nil -} diff --git a/vendor/github.com/filecoin-project/go-amt-ipld/v4/node.go b/vendor/github.com/filecoin-project/go-amt-ipld/v4/node.go deleted file mode 100644 index b0f528c..0000000 --- a/vendor/github.com/filecoin-project/go-amt-ipld/v4/node.go +++ /dev/null @@ -1,529 +0,0 @@ -package amt - -import ( - "bytes" - "context" - "errors" - "fmt" - - "github.com/ipfs/go-cid" - cbor "github.com/ipfs/go-ipld-cbor" - cbg "github.com/whyrusleeping/cbor-gen" - - "github.com/filecoin-project/go-amt-ipld/v4/internal" -) - -// node is described in more detail in its internal serialized form, -// internal.Node. This form contains a fully expanded form of internal.Node -// where the Bmap is used to expand the contracted form of either Values (leaf) -// or Links (non-leaf) for ease of addressing. -// Both properties may be nil if the node is empty (a root node). -type node struct { - links []*link - values []*cbg.Deferred -} - -var ( - errEmptyNode = errors.New("unexpected empty amt node") - errUndefinedCID = errors.New("amt node has undefined CID") - errLinksAndValues = errors.New("amt node has both links and values") - errLeafUnexpected = errors.New("amt leaf not expected at height") - errLeafExpected = errors.New("amt expected at height") - errInvalidCount = errors.New("amt count does not match number of elements") -) - -// the number of bytes required such that there is a single bit for each element -// in the links or value array. This is (bitWidth^2)/8. -func bmapBytes(bitWidth uint) int { - if bitWidth <= 3 { - return 1 - } - return 1 << (bitWidth - 3) -} - -// Create a new from a serialized form. This operation takes an internal.Node -// and returns a node. internal.Node uses bitmap compaction of links or values -// arrays, while node uses the expanded form. This method performs the expansion -// such that we can use simple addressing of this node's child elements. -func newNode(nd internal.Node, bitWidth uint, allowEmpty, expectLeaf bool) (*node, error) { - if len(nd.Links) > 0 && len(nd.Values) > 0 { - // malformed AMT, a node cannot be both leaf and non-leaf - return nil, errLinksAndValues - } - - // strictly require the bitmap to be the correct size for the given bitWidth - if expWidth := bmapBytes(bitWidth); expWidth != len(nd.Bmap) { - return nil, fmt.Errorf( - "expected bitfield to be %d bytes long, found bitfield with %d bytes", - expWidth, len(nd.Bmap), - ) - } - - width := uint(1 << bitWidth) - i := 0 - n := new(node) - if len(nd.Values) > 0 { // leaf node, height=0 - if !expectLeaf { - return nil, errLeafUnexpected - } - n.values = make([]*cbg.Deferred, width) - for x := uint(0); x < width; x++ { - // check if this value exists in the bitmap, pull it out of the compacted - // list if it does - if nd.Bmap[x/8]&(1<<(x%8)) > 0 { - if i >= len(nd.Values) { - // too many bits were set in the bitmap for the number of values - // available - return nil, fmt.Errorf("expected at least %d values, found %d", i+1, len(nd.Values)) - } - n.values[x] = nd.Values[i] - i++ - } - } - if i != len(nd.Values) { - // the number of bits set in the bitmap was not the same as the number of - // values in the array - return nil, fmt.Errorf("expected %d values, got %d", i, len(nd.Values)) - } - } else if len(nd.Links) > 0 { - // non-leaf node, height>0 - if expectLeaf { - return nil, errLeafExpected - } - - n.links = make([]*link, width) - for x := uint(0); x < width; x++ { - // check if this child link exists in the bitmap, pull it out of the - // compacted list if it does - if nd.Bmap[x/8]&(1<<(x%8)) > 0 { - if i >= len(nd.Links) { - // too many bits were set in the bitmap for the number of values - // available - return nil, fmt.Errorf("expected at least %d links, found %d", i+1, len(nd.Links)) - } - c := nd.Links[i] - if !c.Defined() { - return nil, errUndefinedCID - } - // TODO: check link hash function. - prefix := c.Prefix() - if prefix.Codec != cid.DagCBOR { - return nil, fmt.Errorf("internal amt nodes must be cbor, found %d", prefix.Codec) - } - n.links[x] = &link{cid: c} - i++ - } - } - if i != len(nd.Links) { - // the number of bits set in the bitmap was not the same as the number of - // values in the array - return nil, fmt.Errorf("expected %d links, got %d", i, len(nd.Links)) - } - } else if !allowEmpty { // only THE empty AMT case can allow this - return nil, errEmptyNode - } - return n, nil -} - -// collapse occurs when we only have the single child node. If this is the case -// we need to reduce height by one. Continue down the tree, reducing height -// until we're either at a single height=0 node or we have something other than -// a single child node. -func (nd *node) collapse(ctx context.Context, bs cbor.IpldStore, bitWidth uint, height int) (int, error) { - // No links at all? - if nd.links == nil { - return 0, nil - } - - // If we have any links going "to the right", we can't collapse any - // more. - for _, l := range nd.links[1:] { - if l != nil { - return height, nil - } - } - - // If we have _no_ links, we've collapsed everything. - if nd.links[0] == nil { - return 0, nil - } - - // only one child, collapse it. - - subn, err := nd.links[0].load(ctx, bs, bitWidth, height-1) - if err != nil { - return 0, err - } - - // Collapse recursively. - newHeight, err := subn.collapse(ctx, bs, bitWidth, height-1) - if err != nil { - return 0, err - } - - *nd = *subn - - return newHeight, nil -} - -// does this node contain any child nodes or values? -func (nd *node) empty() bool { - for _, l := range nd.links { - if l != nil { - return false - } - } - for _, v := range nd.values { - if v != nil { - return false - } - } - return true -} - -// Recursive get() called through the tree in order to retrieve values from -// leaf nodes. We start at the root and navigate until height=0 where the -// entries themselves should exist. At any point in the navigation we can -// assert that a value does not exist in this AMT if an expected intermediate -// doesn't exist, so we don't need to do full height traversal for many cases -// where we don't have that index. -func (n *node) get(ctx context.Context, bs cbor.IpldStore, bitWidth uint, height int, i uint64, out cbg.CBORUnmarshaler) (bool, error) { - // height=0 means we're operating on a leaf node where the entries themselves - // are stores, we have a `set` so it must exist if the node is correctly - // formed - if height == 0 { - d := n.getValue(i) - found := d != nil - var err error - if found && out != nil { - err = out.UnmarshalCBOR(bytes.NewReader(d.Raw)) - } - return found, err - } - - // Non-leaf case where we need to navigate further down toward the correct - // leaf by consuming some of the provided index to form the index at this - // height and passing the remainder down. - // The calculation performed is to divide the addressible indexes of each - // child node such that each child has the ability to contain that range of - // indexes somewhere in its graph. e.g. at height=1 for bitWidth=3, the total - // addressible index space we can contain is in the range of 0 to - // `(bitWidth^2) ^ (height+1) = 8^2 = 64`. Where each child node can contain - // 64/8 of indexes. This is true regardless of the position in the overall - // AMT and original index from the Get() operation because we modify the index - // before passing it to lower nodes to remove the bits relevant to higher - // addressing. e.g. at height=1, a call to any child's get() will receive an - // index in the range of 0 to bitWidth^2. - nfh := nodesForHeight(bitWidth, height) - ln := n.getLink(i / nfh) - if ln == nil { - // This can occur at any point in the traversal, not just height=0, it just - // means that the higher up it occurs that a larger range of indexes in this - // region don't exist. - return false, nil - } - subn, err := ln.load(ctx, bs, bitWidth, height-1) - if err != nil { - return false, err - } - - // `i%nfh` discards index information for this height so the child only gets - // the part of the index that is relevant for it. - // e.g. get(50) at height=1 for width=8 would be 50%8=2, i.e. the child will - // be asked to get(2) and it will have leaf nodes (because it's height=0) so - // the actual value will be at index=2 of its values array. - return subn.get(ctx, bs, bitWidth, height-1, i%nfh, out) -} - -// Recursively handle a delete through the tree, navigating down in the same -// way as is documented in get(). -func (n *node) delete(ctx context.Context, bs cbor.IpldStore, bitWidth uint, height int, i uint64) (bool, error) { - // at the leaf node where the value is, expand out the values array and - // zero out the value and bit in the bitmap to indicate its deletion - if height == 0 { - if n.getValue(i) == nil { - return false, nil - } - - n.setValue(bitWidth, i, nil) - return true, nil - } - - // see get() documentation on how nfh and subi describes the index at this - // height - nfh := nodesForHeight(bitWidth, height) - subi := i / nfh - - ln := n.getLink(subi) - if ln == nil { - return false, nil - } - - // we're at a non-leaf node, so navigate down to the appropriate child and - // continue - subn, err := ln.load(ctx, bs, bitWidth, height-1) - if err != nil { - return false, err - } - - // see get() documentation for how the i%... calculation trims the index down - // to only that which is applicable for the height below - if deleted, err := subn.delete(ctx, bs, bitWidth, height-1, i%nfh); err != nil { - return false, err - } else if !deleted { - return false, nil - } - - // if the child node we just deleted from now has no children or elements of - // its own, we need to zero it out in this node. This compaction process may - // recursively chain back up through the calling nodes, removing more than - // one node in total for this delete operation (i.e. where an index contains - // the only entry on a particular branch of the tree). - if subn.empty() { - n.setLink(bitWidth, subi, nil) - } else { - ln.dirty = true - } - - return true, nil -} - -// Recursive implementation backing ForEach and ForEachAt. Performs a -// depth-first walk of the tree, beginning at the 'start' index. The 'offset' -// argument helps us locate the lateral position of the current node so we can -// figure out the appropriate 'index', since indexes are not stored with values -// and can only be determined by knowing how far a leaf node is removed from -// the left-most leaf node. -func (n *node) forEachAt(ctx context.Context, bs cbor.IpldStore, bitWidth uint, height int, start, offset uint64, cb func(uint64, *cbg.Deferred) error) error { - if height == 0 { - // height=0 means we're at leaf nodes and get to use our callback - for i, v := range n.values { - if v != nil { - ix := offset + uint64(i) - if ix < start { - // if we're here, 'start' is probably somewhere in the - // middle of this node's elements - continue - } - - // use 'offset' to determine the actual index for this element, it - // tells us how distant we are from the left-most leaf node - if err := cb(offset+uint64(i), v); err != nil { - return err - } - } - } - - return nil - } - - subCount := nodesForHeight(bitWidth, height) - for i, ln := range n.links { - if ln == nil { - continue - } - - // 'offs' tells us the index of the left-most element of the subtree defined - // by 'sub' - offs := offset + (uint64(i) * subCount) - nextOffs := offs + subCount - // nextOffs > offs checks for overflow at MaxIndex (where the next offset wraps back - // to 0). - if nextOffs >= offs && start >= nextOffs { - // if we're here, 'start' lets us skip this entire sub-tree - continue - } - - subn, err := ln.load(ctx, bs, bitWidth, height-1) - if err != nil { - return err - } - - // recurse into the child node, providing 'offs' to tell it where it's - // located in the tree - if err := subn.forEachAt(ctx, bs, bitWidth, height-1, start, offs, cb); err != nil { - return err - } - } - return nil -} - -var errNoVals = fmt.Errorf("no values") - -// Recursive implementation of FirstSetIndex that's performed on the left-most -// nodes of the tree down to the leaf. In order to return a correct index, we -// need to accumulate the appropriate number of spaces to the left of the -// left-most that exist at each level, taking into account the number of -// blank leaf-entry positions that exist. -func (n *node) firstSetIndex(ctx context.Context, bs cbor.IpldStore, bitWidth uint, height int) (uint64, error) { - if height == 0 { - for i, v := range n.values { - if v != nil { - // returning 'i' here is a local index (0<=i= 64 { - // The max depth layer may not be full. - return math.MaxUint64 - } - return 1 << heightLogTwo -} diff --git a/vendor/github.com/filecoin-project/go-amt-ipld/v4/version.json b/vendor/github.com/filecoin-project/go-amt-ipld/v4/version.json deleted file mode 100644 index 2354b51..0000000 --- a/vendor/github.com/filecoin-project/go-amt-ipld/v4/version.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "version": "v4.0.0" -} diff --git a/vendor/github.com/filecoin-project/go-bitfield/CHANGELOG.md b/vendor/github.com/filecoin-project/go-bitfield/CHANGELOG.md deleted file mode 100644 index 0ab8cc7..0000000 --- a/vendor/github.com/filecoin-project/go-bitfield/CHANGELOG.md +++ /dev/null @@ -1,25 +0,0 @@ -# Changelog -All notable changes to this project will be documented in this file. - -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), -and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). - -## [Unreleased] - - -## [0.2.2] - 2020-11-06 -### Added -- CHANGELOG.md file -- SECURITY.md with instructions on how to report security issues. -- Optimized decode/encode workflow by 42% - -[Unreleased]: https://github.com/filecoin-project/go-bitfield/compare/v0.2.2...HEAD -[0.2.2]: https://github.com/filecoin-project/go-bitfield/compare/v0.2.1...v0.2.2 - diff --git a/vendor/github.com/filecoin-project/go-bitfield/LICENSE-APACHE b/vendor/github.com/filecoin-project/go-bitfield/LICENSE-APACHE deleted file mode 100644 index 14478a3..0000000 --- a/vendor/github.com/filecoin-project/go-bitfield/LICENSE-APACHE +++ /dev/null @@ -1,5 +0,0 @@ -Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. diff --git a/vendor/github.com/filecoin-project/go-bitfield/LICENSE-MIT b/vendor/github.com/filecoin-project/go-bitfield/LICENSE-MIT deleted file mode 100644 index 72dc60d..0000000 --- a/vendor/github.com/filecoin-project/go-bitfield/LICENSE-MIT +++ /dev/null @@ -1,19 +0,0 @@ -The MIT License (MIT) - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/filecoin-project/go-bitfield/README.md b/vendor/github.com/filecoin-project/go-bitfield/README.md deleted file mode 100644 index 68c481e..0000000 --- a/vendor/github.com/filecoin-project/go-bitfield/README.md +++ /dev/null @@ -1,16 +0,0 @@ -# go-bitfield - -[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io) -[![CircleCI](https://circleci.com/gh/filecoin-project/go-bitfield.svg?style=svg)](https://circleci.com/gh/filecoin-project/go-bitfield) -[![standard-readme compliant](https://img.shields.io/badge/standard--readme-OK-green.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) - -> Advanced RLE+ implementation - -Features iterator based primitives that scale with number of runs instead of number of bits. - -## License - -The Filecoin Project is dual-licensed under Apache 2.0 and MIT terms: - -- Apache License, Version 2.0, ([LICENSE-APACHE](https://github.com/filecoin-project/go-bitfield/blob/master/LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) -- MIT license ([LICENSE-MIT](https://github.com/filecoin-project/go-bitfield/blob/master/LICENSE-MIT) or http://opensource.org/licenses/MIT) diff --git a/vendor/github.com/filecoin-project/go-bitfield/SECURITY.md b/vendor/github.com/filecoin-project/go-bitfield/SECURITY.md deleted file mode 100644 index 0e810df..0000000 --- a/vendor/github.com/filecoin-project/go-bitfield/SECURITY.md +++ /dev/null @@ -1,9 +0,0 @@ -# Security Policy - -## Reporting a Vulnerability - -For reporting *critical* and *security* bugs, please consult our [Security Policy and Responsible Disclosure Program information](https://github.com/filecoin-project/community/blob/master/SECURITY.md) - -## Reporting a non security bug - -For non-critical bugs, please simply file a GitHub issue on this repo. diff --git a/vendor/github.com/filecoin-project/go-bitfield/bitfield.go b/vendor/github.com/filecoin-project/go-bitfield/bitfield.go deleted file mode 100644 index 591889c..0000000 --- a/vendor/github.com/filecoin-project/go-bitfield/bitfield.go +++ /dev/null @@ -1,798 +0,0 @@ -package bitfield - -import ( - "errors" - "fmt" - "io" - - rlepluslazy "github.com/filecoin-project/go-bitfield/rle" - cbg "github.com/whyrusleeping/cbor-gen" - "golang.org/x/xerrors" -) - -var ( - ErrBitFieldTooMany = errors.New("to many items in RLE") - ErrNoBitsSet = errors.New("bitfield has no set bits") -) - -// MaxEncodedSize is the maximum encoded size of a bitfield. When expanded into -// a slice of runs, a bitfield of this size should not exceed 2MiB of memory. -// -// This bitfield can fit at least 3072 sparse elements. -const MaxEncodedSize = 32 << 10 - -type BitField struct { - rle rlepluslazy.RLE - - set map[uint64]struct{} - unset map[uint64]struct{} -} - -// New constructs a new BitField. -func New() BitField { - bf, err := NewFromBytes([]byte{}) - if err != nil { - panic(fmt.Sprintf("creating empty rle: %+v", err)) - } - return bf -} - -// NewFromBytes deserializes the encoded bitfield. -func NewFromBytes(rle []byte) (BitField, error) { - bf := BitField{} - rlep, err := rlepluslazy.FromBuf(rle) - if err != nil { - return BitField{}, xerrors.Errorf("could not decode rle+: %w", err) - } - bf.rle = rlep - bf.set = make(map[uint64]struct{}) - bf.unset = make(map[uint64]struct{}) - return bf, nil - -} - -func newWithRle(rle rlepluslazy.RLE) BitField { - return BitField{ - set: make(map[uint64]struct{}), - unset: make(map[uint64]struct{}), - rle: rle, - } -} - -// NewFromSet constructs a bitfield from the given set. -func NewFromSet(setBits []uint64) BitField { - res := BitField{ - set: make(map[uint64]struct{}, len(setBits)), - unset: make(map[uint64]struct{}), - } - for _, b := range setBits { - res.set[b] = struct{}{} - } - return res -} - -// NewFromIter constructs a BitField from the RunIterator. -func NewFromIter(r rlepluslazy.RunIterator) (BitField, error) { - buf, err := rlepluslazy.EncodeRuns(r, nil) - if err != nil { - return BitField{}, err - } - - rle, err := rlepluslazy.FromBuf(buf) - if err != nil { - return BitField{}, err - } - - return newWithRle(rle), nil -} - -// MergeBitFields returns the union of the two BitFields. -// -// For example, given two BitFields: -// -// 0 1 1 0 1 -// 1 1 0 1 0 -// -// MergeBitFields would return -// -// 1 1 1 1 1 -// -// This operation's runtime is O(number of runs). -func MergeBitFields(a, b BitField) (BitField, error) { - ra, err := a.RunIterator() - if err != nil { - return BitField{}, err - } - - rb, err := b.RunIterator() - if err != nil { - return BitField{}, err - } - - merge, err := rlepluslazy.Or(ra, rb) - if err != nil { - return BitField{}, err - } - - mergebytes, err := rlepluslazy.EncodeRuns(merge, nil) - if err != nil { - return BitField{}, err - } - - rle, err := rlepluslazy.FromBuf(mergebytes) - if err != nil { - return BitField{}, err - } - - return newWithRle(rle), nil -} - -// MultiMerge returns the unions of all the passed BitFields. -// -// Calling MultiMerge is identical to calling MergeBitFields repeatedly, just -// more efficient when merging more than two BitFields. -// -// This operation's runtime is O(number of runs * number of bitfields). -func MultiMerge(bfs ...BitField) (BitField, error) { - if len(bfs) == 0 { - return NewFromSet(nil), nil - } - - iters := make([]rlepluslazy.RunIterator, 0, len(bfs)) - for _, bf := range bfs { - iter, err := bf.RunIterator() - if err != nil { - return BitField{}, err - } - iters = append(iters, iter) - } - - iter, err := rlepluslazy.Union(iters...) - if err != nil { - return BitField{}, err - } - return NewFromIter(iter) -} - -// CutBitField cuts bitfield B from bitfield A. For every bit in B cut from A, -// subsequent entries in A are shifted down by one. -// -// For example: -// -// a: 0 1 0 1 1 1 -// b: 0 1 1 0 0 0 -// -// c: 0 1 1 1 // cut -// c: 0 1 1 1 // remove holes -func CutBitField(a, b BitField) (BitField, error) { - aiter, err := a.RunIterator() - if err != nil { - return BitField{}, err - } - - biter, err := b.RunIterator() - if err != nil { - return BitField{}, err - } - - var ( - run, cutRun rlepluslazy.Run - output []rlepluslazy.Run - ) - for { - if !run.Valid() { - if !aiter.HasNext() { - // All done. - break - } - - run, err = aiter.NextRun() - if err != nil { - return BitField{}, err - } - } - - if !cutRun.Valid() && biter.HasNext() { - cutRun, err = biter.NextRun() - if err != nil { - return BitField{}, err - } - } - - var newRun rlepluslazy.Run - if !cutRun.Valid() { - newRun = run // keep remaining runs - run.Len = 0 - } else if cutRun.Len >= run.Len { - if !cutRun.Val { - newRun = run - } - cutRun.Len -= run.Len - run.Len = 0 - } else { - if !cutRun.Val { - newRun = rlepluslazy.Run{ - Val: run.Val, - Len: cutRun.Len, - } - } - run.Len -= cutRun.Len - cutRun.Len = 0 - } - - if newRun.Valid() { - if len(output) > 0 && output[len(output)-1].Val == newRun.Val { - // Join adjacent runs of 1s. We may cut in the middle of - // a run. - output[len(output)-1].Len += newRun.Len - } else { - output = append(output, newRun) - } - } - } - - buf, err := rlepluslazy.EncodeRuns(&rlepluslazy.RunSliceIterator{Runs: output}, nil) - if err != nil { - return BitField{}, err - } - - rle, err := rlepluslazy.FromBuf(buf) - if err != nil { - return BitField{}, err - } - - return BitField{rle: rle}, nil -} - -func (bf BitField) RunIterator() (rlepluslazy.RunIterator, error) { - iter, err := bf.rle.RunIterator() - if err != nil { - return nil, err - } - if len(bf.set) > 0 { - slc := make([]uint64, 0, len(bf.set)) - for b := range bf.set { - slc = append(slc, b) - } - set, err := rlepluslazy.RunsFromSlice(slc) - if err != nil { - return nil, err - } - newIter, err := rlepluslazy.Or(iter, set) - if err != nil { - return nil, err - } - iter = newIter - } - if len(bf.unset) > 0 { - slc := make([]uint64, 0, len(bf.unset)) - for b := range bf.unset { - slc = append(slc, b) - } - - unset, err := rlepluslazy.RunsFromSlice(slc) - if err != nil { - return nil, err - } - newIter, err := rlepluslazy.Subtract(iter, unset) - if err != nil { - return nil, err - } - iter = newIter - } - return iter, nil -} - -// Set sets the given bit in the BitField -// -// This operation's runtime is O(1) up-front. However, it adds an O(bits -// explicitly set) cost to all other operations. -func (bf BitField) Set(bit uint64) { - delete(bf.unset, bit) - bf.set[bit] = struct{}{} -} - -// Unset unsets given bit in the BitField -// -// This operation's runtime is O(1). However, it adds an O(bits -// explicitly unset) cost to all other operations. -func (bf BitField) Unset(bit uint64) { - delete(bf.set, bit) - bf.unset[bit] = struct{}{} -} - -// Count counts the non-zero bits in the bitfield. -// -// For example, given: -// -// 1 0 1 1 -// -// Count() will return 3. -// -// This operation's runtime is O(number of runs). -func (bf BitField) Count() (uint64, error) { - s, err := bf.RunIterator() - if err != nil { - return 0, err - } - return rlepluslazy.Count(s) -} - -// All returns a slice of set bits in sorted order. -// -// For example, given: -// -// 1 0 0 1 -// -// All will return: -// -// []uint64{0, 3} -// -// This operation's runtime is O(number of bits). -func (bf BitField) All(max uint64) ([]uint64, error) { - c, err := bf.Count() - if err != nil { - return nil, xerrors.Errorf("count errror: %w", err) - } - if c > max { - return nil, xerrors.Errorf("expected %d, got %d: %w", max, c, ErrBitFieldTooMany) - } - - runs, err := bf.RunIterator() - if err != nil { - return nil, err - } - - res, err := rlepluslazy.SliceFromRuns(runs) - if err != nil { - return nil, err - } - - return res, nil -} - -// AllMap returns a map of all set bits. -// -// For example, given: -// -// 1 0 0 1 -// -// All will return: -// -// map[uint64]bool{0: true, 3: true} -// -// This operation's runtime is O(number of bits). -func (bf BitField) AllMap(max uint64) (map[uint64]bool, error) { - c, err := bf.Count() - if err != nil { - return nil, xerrors.Errorf("count errror: %w", err) - } - if c > max { - return nil, xerrors.Errorf("expected %d, got %d: %w", max, c, ErrBitFieldTooMany) - } - - runs, err := bf.RunIterator() - if err != nil { - return nil, err - } - - res, err := rlepluslazy.SliceFromRuns(runs) - if err != nil { - return nil, err - } - - out := make(map[uint64]bool, len(res)) - for _, i := range res { - out[i] = true - } - return out, nil -} - -func (bf BitField) MarshalCBOR(w io.Writer) error { - var rle []byte - if len(bf.set) == 0 && len(bf.unset) == 0 { - // If unmodified, avoid re-encoding. - rle = bf.rle.Bytes() - } else { - - s, err := bf.RunIterator() - if err != nil { - return err - } - - rle, err = rlepluslazy.EncodeRuns(s, []byte{}) - if err != nil { - return err - } - } - - if len(rle) > MaxEncodedSize { - return xerrors.Errorf("encoded bitfield was too large (%d)", len(rle)) - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajByteString, uint64(len(rle)))); err != nil { - return err - } - if _, err := w.Write(rle); err != nil { - return xerrors.Errorf("writing rle: %w", err) - } - return nil -} - -func (bf *BitField) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if extra > MaxEncodedSize { - return fmt.Errorf("array too large") - } - - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - - buf := make([]byte, extra) - if _, err := io.ReadFull(br, buf); err != nil { - return err - } - - rle, err := rlepluslazy.FromBuf(buf) - if err != nil { - return xerrors.Errorf("could not decode rle+: %w", err) - } - bf.rle = rle - bf.set = make(map[uint64]struct{}) - bf.unset = make(map[uint64]struct{}) - - return nil -} - -func (bf BitField) MarshalJSON() ([]byte, error) { - - c, err := bf.Copy() - if err != nil { - return nil, err - } - - return c.rle.MarshalJSON() -} - -func (bf *BitField) UnmarshalJSON(b []byte) error { - - err := bf.rle.UnmarshalJSON(b) - if err != nil { - return err - } - bf.set = make(map[uint64]struct{}) - bf.unset = make(map[uint64]struct{}) - return nil -} - -// ForEach iterates over each set bit. -// -// This operation's runtime is O(bits set). -func (bf BitField) ForEach(f func(uint64) error) error { - iter, err := bf.RunIterator() - if err != nil { - return err - } - - var i uint64 - for iter.HasNext() { - r, err := iter.NextRun() - if err != nil { - return err - } - - if r.Val { - for j := uint64(0); j < r.Len; j++ { - if err := f(i); err != nil { - return err - } - i++ - } - } else { - i += r.Len - } - } - return nil -} - -// IsSet returns true if the given bit is set. -// -// This operation's runtime is O(number of runs). -func (bf BitField) IsSet(x uint64) (bool, error) { - if _, ok := bf.set[x]; ok { - return true, nil - } - - if _, ok := bf.unset[x]; ok { - return false, nil - } - - iter, err := bf.rle.RunIterator() - if err != nil { - return false, err - } - - return rlepluslazy.IsSet(iter, x) -} - -// First returns the index of the first set bit. This function returns -// ErrNoBitsSet when no bits have been set. -// -// This operation's runtime is O(1). -func (bf BitField) First() (uint64, error) { - iter, err := bf.RunIterator() - if err != nil { - return 0, err - } - - var i uint64 - for iter.HasNext() { - r, err := iter.NextRun() - if err != nil { - return 0, err - } - - if r.Val { - return i, nil - } else { - i += r.Len - } - } - return 0, ErrNoBitsSet -} - -// Last returns the index of the last set bit. This function returns -// ErrNoBitsSet when no bits have been set. -// -// This operation's runtime is O(n). -func (bf BitField) Last() (uint64, error) { - iter, err := bf.RunIterator() - if err != nil { - return 0, err - } - - var ( - at, maxplusone uint64 - ) - for iter.HasNext() { - run, err := iter.NextRun() - if err != nil { - return 0, err - } - - at += run.Len - - if run.Val { - maxplusone = at - } - } - if maxplusone == 0 { - return 0, ErrNoBitsSet - } - return maxplusone - 1, nil -} - -// IsEmpty returns true if the bitset is empty. -// -// This operation's runtime is O(1). -func (bf BitField) IsEmpty() (bool, error) { - _, err := bf.First() - switch err { - case ErrNoBitsSet: - return true, nil - case nil: - return false, nil - default: - return false, err - } -} - -// Slice treats the BitField as an ordered set of set bits, then slices this set. -// -// That is, it skips start set bits, then returns the next count set bits. -// -// For example, given: -// -// 1 0 1 1 0 1 1 -// -// bf.Slice(2, 2) would return: -// -// 0 0 0 1 0 1 0 -// -// This operation's runtime is O(number of runs). -func (bf BitField) Slice(start, count uint64) (BitField, error) { - iter, err := bf.RunIterator() - if err != nil { - return BitField{}, err - } - - valsUntilStart := start - - var sliceRuns []rlepluslazy.Run - var i, outcount uint64 - for iter.HasNext() && valsUntilStart > 0 { - r, err := iter.NextRun() - if err != nil { - return BitField{}, err - } - - if r.Val { - if r.Len <= valsUntilStart { - valsUntilStart -= r.Len - i += r.Len - } else { - i += valsUntilStart - - rem := r.Len - valsUntilStart - if rem > count { - rem = count - } - - sliceRuns = append(sliceRuns, - rlepluslazy.Run{Val: false, Len: i}, - rlepluslazy.Run{Val: true, Len: rem}, - ) - outcount += rem - valsUntilStart = 0 - } - } else { - i += r.Len - } - } - - for iter.HasNext() && outcount < count { - r, err := iter.NextRun() - if err != nil { - return BitField{}, err - } - - if r.Val { - if r.Len <= count-outcount { - sliceRuns = append(sliceRuns, r) - outcount += r.Len - } else { - sliceRuns = append(sliceRuns, rlepluslazy.Run{Val: true, Len: count - outcount}) - outcount = count - } - } else { - if len(sliceRuns) == 0 { - r.Len += i - } - sliceRuns = append(sliceRuns, r) - } - } - if outcount < count { - return BitField{}, fmt.Errorf("not enough bits set in field to satisfy slice count") - } - - buf, err := rlepluslazy.EncodeRuns(&rlepluslazy.RunSliceIterator{Runs: sliceRuns}, nil) - if err != nil { - return BitField{}, err - } - - rle, err := rlepluslazy.FromBuf(buf) - if err != nil { - return BitField{}, err - } - - return BitField{rle: rle}, nil -} - -// IntersectBitField returns the intersection of the two BitFields. -// -// For example, given two BitFields: -// -// 0 1 1 0 1 -// 1 1 0 1 0 -// -// IntersectBitField would return -// -// 0 1 0 0 0 -// -// This operation's runtime is O(number of runs). -func IntersectBitField(a, b BitField) (BitField, error) { - ar, err := a.RunIterator() - if err != nil { - return BitField{}, err - } - - br, err := b.RunIterator() - if err != nil { - return BitField{}, err - } - - andIter, err := rlepluslazy.And(ar, br) - if err != nil { - return BitField{}, err - } - - buf, err := rlepluslazy.EncodeRuns(andIter, nil) - if err != nil { - return BitField{}, err - } - - rle, err := rlepluslazy.FromBuf(buf) - if err != nil { - return BitField{}, err - } - - return newWithRle(rle), nil -} - -// SubtractBitField returns the difference between the two BitFields. That is, -// it returns a bitfield of all bits set in a but not set in b. -// -// For example, given two BitFields: -// -// 0 1 1 0 1 // a -// 1 1 0 1 0 // b -// -// SubtractBitFields would return -// -// 0 0 1 0 1 -// -// This operation's runtime is O(number of runs). -func SubtractBitField(a, b BitField) (BitField, error) { - ar, err := a.RunIterator() - if err != nil { - return BitField{}, err - } - - br, err := b.RunIterator() - if err != nil { - return BitField{}, err - } - - andIter, err := rlepluslazy.Subtract(ar, br) - if err != nil { - return BitField{}, err - } - - buf, err := rlepluslazy.EncodeRuns(andIter, nil) - if err != nil { - return BitField{}, err - } - - rle, err := rlepluslazy.FromBuf(buf) - if err != nil { - return BitField{}, err - } - - return newWithRle(rle), nil -} - -// Copy flushes the bitfield and returns a copy that can be mutated -// without changing the original values -func (bf BitField) Copy() (BitField, error) { - r, err := bf.RunIterator() - if err != nil { - return BitField{}, err - } - - buf, err := rlepluslazy.EncodeRuns(r, nil) - if err != nil { - return BitField{}, err - } - - rle, err := rlepluslazy.FromBuf(buf) - if err != nil { - return BitField{}, err - } - - return newWithRle(rle), nil -} - -// BitIterator iterates over the bits in the bitmap -func (bf BitField) BitIterator() (rlepluslazy.BitIterator, error) { - r, err := bf.RunIterator() - if err != nil { - return nil, err - } - return rlepluslazy.BitsFromRuns(r) -} diff --git a/vendor/github.com/filecoin-project/go-bitfield/codecov.yml b/vendor/github.com/filecoin-project/go-bitfield/codecov.yml deleted file mode 100644 index 01da6d5..0000000 --- a/vendor/github.com/filecoin-project/go-bitfield/codecov.yml +++ /dev/null @@ -1,3 +0,0 @@ -comment: off -github_checks: - annotations: false diff --git a/vendor/github.com/filecoin-project/go-bitfield/rle/bits.go b/vendor/github.com/filecoin-project/go-bitfield/rle/bits.go deleted file mode 100644 index cef81fd..0000000 --- a/vendor/github.com/filecoin-project/go-bitfield/rle/bits.go +++ /dev/null @@ -1,195 +0,0 @@ -package rlepluslazy - -import ( - "errors" - "sort" -) - -var ErrEndOfIterator = errors.New("end of iterator") - -type it2b struct { - source RunIterator - curIdx uint64 - - run Run -} - -func (it *it2b) HasNext() bool { - return it.run.Valid() -} - -func (it *it2b) Next() (uint64, error) { - it.run.Len-- - res := it.curIdx - it.curIdx++ - return res, it.prep() -} - -func (it *it2b) Nth(n uint64) (uint64, error) { - skip := n + 1 - for it.run.Len < skip { - if !it.HasNext() { - return 0, ErrEndOfIterator - } - skip -= it.run.Len - it.curIdx += it.run.Len - it.run.Len = 0 - if err := it.prep(); err != nil { - return 0, err - } - } - it.run.Len -= skip - it.curIdx += skip - res := it.curIdx - 1 - return res, it.prep() -} - -func (it *it2b) prep() error { - for !it.run.Valid() && it.source.HasNext() { - var err error - it.run, err = it.source.NextRun() - if err != nil { - return err - } - - if !it.run.Val { - it.curIdx += it.run.Len - it.run.Len = 0 - } - } - return nil -} - -func BitsFromRuns(source RunIterator) (BitIterator, error) { - it := &it2b{source: source} - if err := it.prep(); err != nil { - return nil, err - } - return it, nil -} - -type sliceIt struct { - s []uint64 -} - -func (it sliceIt) HasNext() bool { - return len(it.s) != 0 -} - -func (it *sliceIt) Next() (uint64, error) { - if len(it.s) == 0 { - return 0, ErrEndOfIterator - } - res := it.s[0] - it.s = it.s[1:] - return res, nil -} - -func (it *sliceIt) Nth(n uint64) (uint64, error) { - if uint64(len(it.s)) <= n { - it.s = nil - return 0, ErrEndOfIterator - } - res := it.s[n] - it.s = it.s[n+1:] - return res, nil -} - -func BitsFromSlice(slice []uint64) BitIterator { - sort.Slice(slice, func(i, j int) bool { return slice[i] < slice[j] }) - return &sliceIt{slice} -} - -type it2r struct { - source BitIterator - - runIdx uint64 - run [2]Run -} - -func (it *it2r) HasNext() bool { - return it.run[0].Valid() -} - -func (it *it2r) NextRun() (Run, error) { - res := it.run[0] - it.runIdx = it.runIdx + res.Len - it.run[0], it.run[1] = it.run[1], Run{} - return res, it.prep() -} - -func (it *it2r) prep() error { - if !it.HasNext() { - return nil - } - if !it.run[0].Val { - it.run[1].Val = true - it.run[1].Len = 1 - return nil - } - - for it.source.HasNext() && !it.run[1].Valid() { - nB, err := it.source.Next() - if err != nil { - return err - } - - //fmt.Printf("runIdx: %d, run[0].Len: %d, nB: %d\n", it.runIdx, it.run[0].Len, nB) - if it.runIdx+it.run[0].Len == nB { - it.run[0].Len++ - } else { - it.run[1].Len = nB - it.runIdx - it.run[0].Len - it.run[1].Val = false - } - } - return nil -} - -func (it *it2r) init() error { - if it.source.HasNext() { - nB, err := it.source.Next() - if err != nil { - return err - } - it.run[0].Len = nB - it.run[0].Val = false - it.run[1].Len = 1 - it.run[1].Val = true - } - - if !it.run[0].Valid() { - it.run[0], it.run[1] = it.run[1], Run{} - return it.prep() - } - return nil -} - -func SliceFromRuns(source RunIterator) ([]uint64, error) { - rit, err := BitsFromRuns(source) - if err != nil { - return nil, err - } - - res := make([]uint64, 0) - for rit.HasNext() { - bit, err := rit.Next() - if err != nil { - return nil, err - } - res = append(res, bit) - } - return res, nil -} - -func RunsFromBits(source BitIterator) (RunIterator, error) { - it := &it2r{source: source} - - if err := it.init(); err != nil { - return nil, err - } - return it, nil -} - -func RunsFromSlice(slice []uint64) (RunIterator, error) { - return RunsFromBits(BitsFromSlice(slice)) -} diff --git a/vendor/github.com/filecoin-project/go-bitfield/rle/bitvec.go b/vendor/github.com/filecoin-project/go-bitfield/rle/bitvec.go deleted file mode 100644 index 4331e0a..0000000 --- a/vendor/github.com/filecoin-project/go-bitfield/rle/bitvec.go +++ /dev/null @@ -1,134 +0,0 @@ -package rlepluslazy - -type rbitvec struct { - index int - - bits uint16 - bitCap byte - - vec []byte -} - -func readBitvec(vec []byte) *rbitvec { - bv := &rbitvec{ - vec: vec, - index: 1, - bitCap: 8, - } - if len(vec) > 0 { - bv.bits = uint16(bv.vec[0]) - } - return bv -} - -func (bv *rbitvec) GetByte() byte { - // Advancing byte by byte is simpler than advancing an odd number of - // bits because we _always_ load the next byte. - res := byte(bv.bits) - bv.bits >>= 8 - - if bv.index < len(bv.vec) { // if vector allows - // add bits onto the end of temporary storage - bv.bits |= uint16(bv.vec[bv.index]) << (bv.bitCap - 8) - } - - bv.index += 1 - return res -} - -func (bv *rbitvec) Peek6() byte { - return byte(bv.bits) & 0x3f -} - -func (bv *rbitvec) GetBit() bool { - // The specialized GetBit is easier for the compiler to optimize, for some reason. - - res := (bv.bits&0x1 != 0) - bv.bits >>= 1 - bv.bitCap -= 1 - - if bv.index < len(bv.vec) { // if vector allows - // add bits onto the end of temporary storage - bv.bits |= uint16(bv.vec[bv.index]) << bv.bitCap - } - - inc := byte(0) - if bv.bitCap < 8 { - inc = 1 - } - bv.index += int(inc) // increase index if we need more bits - bv.bitCap += inc * 8 // increase bitCap by 8 - return res -} - -func (bv *rbitvec) Get(count byte) byte { - res := byte(bv.bits) & (1<>= count // remove those bits from storage - bv.bitCap -= count // decrease nuber of stored bits - - if bv.index < len(bv.vec) { // if vector allows - // add bits onto the end of temporary storage - bv.bits |= uint16(bv.vec[bv.index]) << bv.bitCap - } - - // This is equivalent to - // if bv.bitCap < 8 { - // bv.index++ - // bv.bitCap = bv.bitCap + 8 - // } - // but implemented so go doesn't generate branches. - inc := byte(0) - if bv.bitCap < 8 { - inc = 1 - } - bv.index += int(inc) // increase index if we need more bits - bv.bitCap += inc * 8 // increase bitCap by 8 - - return res -} - -func writeBitvec(buf []byte) *wbitvec { - // reslice to 0 length for consistent input but to keep capacity - return &wbitvec{buf: buf[:0]} -} - -type wbitvec struct { - buf []byte // buffer we will be saving to - - bits uint16 // temporary storage for bits - bitCap byte // number of bits stored in temporary storage -} - -// Returns the resulting bitvector, with any trailing zero bytes removed. -func (bv *wbitvec) Out() []byte { - if bv.bitCap != 0 { - // if there are some bits in temporary storage we need to save them - bv.buf = append(bv.buf, byte(bv.bits)) - } - if bv.bitCap > 8 { - // if we store some needed bits in second byte, save them also - bv.buf = append(bv.buf, byte(bv.bits>>8)) - } - bv.bitCap = 0 - bv.bits = 0 - - // Minimally encode. - for len(bv.buf) > 0 && bv.buf[len(bv.buf)-1] == 0 { - bv.buf = bv.buf[:len(bv.buf)-1] - } - - return bv.buf -} - -func (bv *wbitvec) Put(val byte, count byte) { - // put val into its place in bv.bits - bv.bits = bv.bits | uint16(val)<= 8 { - bv.buf = append(bv.buf, byte(bv.bits)) - bv.bitCap -= 8 - bv.bits >>= 8 - } -} diff --git a/vendor/github.com/filecoin-project/go-bitfield/rle/interface.go b/vendor/github.com/filecoin-project/go-bitfield/rle/interface.go deleted file mode 100644 index f76de1d..0000000 --- a/vendor/github.com/filecoin-project/go-bitfield/rle/interface.go +++ /dev/null @@ -1,25 +0,0 @@ -package rlepluslazy - -type Run struct { - Val bool - Len uint64 -} - -func (r Run) Valid() bool { - return r.Len != 0 -} - -type RunIterator interface { - NextRun() (Run, error) - HasNext() bool -} - -type RunIterable interface { - RunIterator() (RunIterator, error) -} - -type BitIterator interface { - Next() (uint64, error) - Nth(n uint64) (uint64, error) - HasNext() bool -} diff --git a/vendor/github.com/filecoin-project/go-bitfield/rle/joinclose.go b/vendor/github.com/filecoin-project/go-bitfield/rle/joinclose.go deleted file mode 100644 index 1fd85bb..0000000 --- a/vendor/github.com/filecoin-project/go-bitfield/rle/joinclose.go +++ /dev/null @@ -1,65 +0,0 @@ -package rlepluslazy - -func JoinClose(it RunIterator, closeness uint64) (RunIterator, error) { - jc := &jcIt{ - it: &peekIter{it: it}, - closeness: closeness, - } - if err := jc.prep(); err != nil { - return nil, err - } - return jc, nil -} - -type jcIt struct { - it *peekIter - run Run - - closeness uint64 -} - -func (jc *jcIt) prep() error { - if !jc.it.HasNext() { - jc.run = Run{} - return nil - } - - var err error - jc.run, err = jc.it.NextRun() - if err != nil { - return err - } - - if jc.run.Val { - for { - if jc.it.HasNext() { - run, err := jc.it.NextRun() - if err != nil { - return err - } - if run.Len <= jc.closeness || run.Val { - jc.run.Len += run.Len - continue - } else { - jc.it.put(run, err) - break - } - } - break - } - } - return nil -} - -func (jc *jcIt) HasNext() bool { - return jc.run.Valid() -} - -func (jc *jcIt) NextRun() (Run, error) { - out := jc.run - if err := jc.prep(); err != nil { - return Run{}, err - } - return out, nil - -} diff --git a/vendor/github.com/filecoin-project/go-bitfield/rle/merge.go b/vendor/github.com/filecoin-project/go-bitfield/rle/merge.go deleted file mode 100644 index 9c18f4b..0000000 --- a/vendor/github.com/filecoin-project/go-bitfield/rle/merge.go +++ /dev/null @@ -1,31 +0,0 @@ -package rlepluslazy - -// Union returns the union of the passed iterators. Internally, this calls Or on -// the passed iterators, combining them with a binary tree of Ors. -func Union(iters ...RunIterator) (RunIterator, error) { - if len(iters) == 0 { - return RunsFromSlice(nil) - } - - for len(iters) > 1 { - var next []RunIterator - - for i := 0; i < len(iters); i += 2 { - if i+1 >= len(iters) { - next = append(next, iters[i]) - continue - } - - orit, err := Or(iters[i], iters[i+1]) - if err != nil { - return nil, err - } - - next = append(next, orit) - } - - iters = next - } - - return iters[0], nil -} diff --git a/vendor/github.com/filecoin-project/go-bitfield/rle/rleplus.go b/vendor/github.com/filecoin-project/go-bitfield/rle/rleplus.go deleted file mode 100644 index 267cd1d..0000000 --- a/vendor/github.com/filecoin-project/go-bitfield/rle/rleplus.go +++ /dev/null @@ -1,133 +0,0 @@ -package rlepluslazy - -import ( - "encoding/json" - "errors" - "fmt" - - "golang.org/x/xerrors" -) - -const Version = 0 - -var ( - ErrWrongVersion = errors.New("invalid RLE+ version") - ErrDecode = fmt.Errorf("invalid encoding for RLE+ version %d", Version) -) - -type RLE struct { - buf []byte - validated bool -} - -func FromBuf(buf []byte) (RLE, error) { - rle := RLE{buf: buf} - - if len(buf) > 0 && buf[0]&3 != Version { - return RLE{}, xerrors.Errorf("could not create RLE+ for a buffer: %w", ErrWrongVersion) - } - - return rle, nil -} - -// Bytes returns the encoded RLE. -// -// Do not modify. -func (rle *RLE) Bytes() []byte { - return rle.buf -} - -// Validate is a separate function to show up on profile for repeated decode evaluation -func (rle *RLE) Validate() error { - if !rle.validated { - return ValidateRLE(rle.buf) - } - return nil -} - -func (rle *RLE) RunIterator() (RunIterator, error) { - err := rle.Validate() - if err != nil { - return nil, xerrors.Errorf("validation failed: %w", err) - } - - source, err := DecodeRLE(rle.buf) - if err != nil { - return nil, xerrors.Errorf("decoding RLE: %w", err) - } - - return source, nil -} - -func (rle *RLE) Count() (uint64, error) { - it, err := rle.RunIterator() - if err != nil { - return 0, err - } - return Count(it) -} - -// Encoded as an array of run-lengths, always starting with zeroes (absent values) -// E.g.: The set {0, 1, 2, 8, 9} is the bitfield 1110000011, and would be marshalled as [0, 3, 5, 2] -func (rle *RLE) MarshalJSON() ([]byte, error) { - r, err := rle.RunIterator() - if err != nil { - return nil, err - } - - var ret []uint64 - if r.HasNext() { - first, err := r.NextRun() - if err != nil { - return nil, err - } - if first.Val { - ret = append(ret, 0) - } - ret = append(ret, first.Len) - - for r.HasNext() { - next, err := r.NextRun() - if err != nil { - return nil, err - } - - ret = append(ret, next.Len) - } - } else { - ret = []uint64{0} - } - - return json.Marshal(ret) -} - -func (rle *RLE) UnmarshalJSON(b []byte) error { - var buf []uint64 - - if err := json.Unmarshal(b, &buf); err != nil { - return err - } - - runs := []Run{} - val := false - for i, v := range buf { - if v == 0 { - if i != 0 { - return xerrors.New("Cannot have a zero-length run except at start") - } - } else { - runs = append(runs, Run{ - Val: val, - Len: v, - }) - } - val = !val - } - enc, err := EncodeRuns(&RunSliceIterator{Runs: runs}, []byte{}) - if err != nil { - return xerrors.Errorf("encoding runs: %w", err) - } - rle.buf = enc - - return nil -} diff --git a/vendor/github.com/filecoin-project/go-bitfield/rle/rleplus_reader.go b/vendor/github.com/filecoin-project/go-bitfield/rle/rleplus_reader.go deleted file mode 100644 index 802f16c..0000000 --- a/vendor/github.com/filecoin-project/go-bitfield/rle/rleplus_reader.go +++ /dev/null @@ -1,188 +0,0 @@ -package rlepluslazy - -import ( - "math" - "math/bits" - - "golang.org/x/xerrors" -) - -type decodeInfo struct { - length byte // length of the run - i byte // i+1 is number of repeats of above run lengths - n byte // number of bits to read - varint bool // varint signifies that futher bits need to be processed as a varint -} - -func init() { - buildDecodeTable() -} - -// This is a LUT for all possible 6 bit codes and what they decode into -// possible combinations are: -// 0bxxxxxx1 - 1 run of 1 -// 0bxxxxx11 - 2 runs of 1 -// up to 0b111111 - 6 runs of 1 -// 0bAAAA10 - 1 run of length 0bAAAA -// 0bxxxx00 - varint run, the decode value not defined in LUT -var decodeTable = [1 << 6]decodeInfo{} - -func buildDecodeTable() { - for idx := uint8(0); int(idx) < len(decodeTable); idx++ { - switch { - case bits.TrailingZeros8(^idx) > 0: - i := uint8(bits.TrailingZeros8(^idx)) - decodeTable[idx] = decodeInfo{ - length: 1, - i: i - 1, - n: i, - } - case idx&0b11 == 0b10: - // 01 + 4bit : run of 0 to 15 - decodeTable[idx] = decodeInfo{ - length: byte(idx >> 2), - i: 0, - n: 6, - } - case idx&0b11 == 0b00: - decodeTable[idx] = decodeInfo{ - n: 2, - varint: true, - } - } - } -} - -func DecodeRLE(buf []byte) (RunIterator, error) { - if len(buf) > 0 && buf[len(buf)-1] == 0 { - // trailing zeros bytes not allowed. - return nil, xerrors.Errorf("not minimally encoded: %w", ErrDecode) - } - - bv := readBitvec(buf) - - ver := bv.Get(2) // Read version - if ver != Version { - return nil, ErrWrongVersion - } - - it := &rleIterator{bv: bv} - - // next run is previous in relation to prep - // so we invert the value - it.lastVal = bv.Get(1) != 1 - if err := it.prep(); err != nil { - return nil, err - } - return it, nil -} - -// ValidateRLE validates the RLE+ in buf does not overflow Uint64 -func ValidateRLE(buf []byte) error { - if len(buf) > 0 && buf[len(buf)-1] == 0 { - // trailing zeros bytes not allowed. - return xerrors.Errorf("not minimally encoded: %w", ErrDecode) - } - bv := readBitvec(buf) - - ver := bv.Get(2) // Read version - if ver != Version { - return ErrWrongVersion - } - - // this is run value bit, as we are validating lengths we don't care about it - bv.Get(1) - - totalLen := uint64(0) - for { - idx := bv.Peek6() - decode := decodeTable[idx] - _ = bv.Get(decode.n) - - var runLen uint64 - if decode.varint { - x, err := decodeBFVarint(bv) - if err != nil { - return err - } - runLen = x - } else { - runLen = uint64(decode.i+1) * uint64(decode.length) - } - - if math.MaxUint64-runLen < totalLen { - return xerrors.Errorf("RLE+ overflow") - } - totalLen += runLen - if runLen == 0 { - break - } - } - return nil -} - -type rleIterator struct { - bv *rbitvec - length uint64 - - lastVal bool - i uint8 -} - -func (it *rleIterator) HasNext() bool { - return it.length != 0 -} - -func (it *rleIterator) NextRun() (r Run, err error) { - ret := Run{Len: it.length, Val: !it.lastVal} - it.lastVal = ret.Val - - if it.i == 0 { - err = it.prep() - } else { - it.i-- - } - return ret, err -} - -func decodeBFVarint(bv *rbitvec) (uint64, error) { - // Modified from the go standard library. Copyright the Go Authors and - // released under the BSD License. - var x uint64 - var s uint - for i := 0; ; i++ { - if i == 10 { - return 0, xerrors.Errorf("run too long: %w", ErrDecode) - } - b := bv.GetByte() - if b < 0x80 { - if i > 9 || i == 9 && b > 1 { - return 0, xerrors.Errorf("run too long: %w", ErrDecode) - } else if b == 0 && s > 0 { - return 0, xerrors.Errorf("invalid run: %w", ErrDecode) - } - x |= uint64(b) << s - break - } - x |= uint64(b&0x7f) << s - s += 7 - } - return x, nil -} - -func (it *rleIterator) prep() error { - idx := it.bv.Peek6() - decode := decodeTable[idx] - _ = it.bv.Get(decode.n) - - it.i = decode.i - it.length = uint64(decode.length) - if decode.varint { - x, err := decodeBFVarint(it.bv) - if err != nil { - return err - } - it.length = x - } - return nil -} diff --git a/vendor/github.com/filecoin-project/go-bitfield/rle/rleplus_writer.go b/vendor/github.com/filecoin-project/go-bitfield/rle/rleplus_writer.go deleted file mode 100644 index cecbb27..0000000 --- a/vendor/github.com/filecoin-project/go-bitfield/rle/rleplus_writer.go +++ /dev/null @@ -1,64 +0,0 @@ -package rlepluslazy - -import ( - "encoding/binary" - "errors" -) - -var ErrSameValRuns = errors.New("2 consecutive runs with the same value") - -func EncodeRuns(rit RunIterator, buf []byte) ([]byte, error) { - bv := writeBitvec(buf) - bv.Put(0, 2) - - first := true - varBuf := make([]byte, binary.MaxVarintLen64) - prev := false - - for rit.HasNext() { - run, err := rit.NextRun() - if err != nil { - return nil, err - } - if !rit.HasNext() && !run.Val { - break - } - - if first { - if run.Val { - bv.Put(1, 1) - } else { - bv.Put(0, 1) - } - prev = run.Val - first = false - } else { - if prev == run.Val { - return nil, ErrSameValRuns - } - prev = run.Val - } - - switch { - case run.Len == 1: - bv.Put(1, 1) - case run.Len < 16: - bv.Put(2, 2) - bv.Put(byte(run.Len), 4) - case run.Len >= 16: - bv.Put(0, 2) - numBytes := binary.PutUvarint(varBuf, run.Len) - for i := 0; i < numBytes; i++ { - bv.Put(varBuf[i], 8) - } - } - - } - - if first { - bv.Put(0, 1) - } - - return bv.Out(), nil - -} diff --git a/vendor/github.com/filecoin-project/go-bitfield/rle/runs.go b/vendor/github.com/filecoin-project/go-bitfield/rle/runs.go deleted file mode 100644 index e667bc2..0000000 --- a/vendor/github.com/filecoin-project/go-bitfield/rle/runs.go +++ /dev/null @@ -1,347 +0,0 @@ -package rlepluslazy - -import ( - "fmt" - "math" - - "golang.org/x/xerrors" -) - -func Or(a, b RunIterator) (RunIterator, error) { - if !a.HasNext() { - return b, nil - } else if !b.HasNext() { - return a, nil - } - it := addIt{a: a, b: b} - return &it, it.prep() -} - -type addIt struct { - a RunIterator - b RunIterator - - next Run - - arun Run - brun Run -} - -func (it *addIt) prep() error { - var err error - - fetch := func() error { - if !it.arun.Valid() && it.a.HasNext() { - it.arun, err = it.a.NextRun() - if err != nil { - return err - } - } - - if !it.brun.Valid() && it.b.HasNext() { - it.brun, err = it.b.NextRun() - if err != nil { - return err - } - } - return nil - } - - if err := fetch(); err != nil { - return err - } - - // one is not valid - if !it.arun.Valid() { - it.next = it.brun - it.brun.Len = 0 - return nil - } - - if !it.brun.Valid() { - it.next = it.arun - it.arun.Len = 0 - return nil - } - - if !it.arun.Val && !it.brun.Val { - min := it.arun.Len - if it.brun.Len < min { - min = it.brun.Len - } - it.next = Run{Val: it.arun.Val, Len: min} - it.arun.Len -= it.next.Len - it.brun.Len -= it.next.Len - - if err := fetch(); err != nil { - return err - } - trailingRun := func(r1, r2 Run) bool { - return !r1.Valid() && r2.Val == it.next.Val - } - if trailingRun(it.arun, it.brun) || trailingRun(it.brun, it.arun) { - it.next.Len += it.arun.Len - it.next.Len += it.brun.Len - it.arun.Len = 0 - it.brun.Len = 0 - } - - return nil - } - - it.next = Run{Val: true} - // different vals, 'true' wins - for (it.arun.Val && it.arun.Valid()) || (it.brun.Val && it.brun.Valid()) { - min := it.arun.Len - if it.brun.Len < min && it.brun.Valid() || !it.arun.Valid() { - min = it.brun.Len - } - it.next.Len += min - if it.arun.Valid() { - it.arun.Len -= min - } - if it.brun.Valid() { - it.brun.Len -= min - } - if err := fetch(); err != nil { - return err - } - } - - return nil -} - -func (it *addIt) HasNext() bool { - return it.next.Valid() -} - -func (it *addIt) NextRun() (Run, error) { - next := it.next - return next, it.prep() -} - -func Count(ri RunIterator) (uint64, error) { - var length uint64 - var count uint64 - - for ri.HasNext() { - r, err := ri.NextRun() - if err != nil { - return 0, err - } - - if math.MaxUint64-r.Len < length { - return 0, xerrors.New("RLE+ overflows") - } - length += r.Len - - if r.Val { - count += r.Len - } - } - return count, nil -} - -func IsSet(ri RunIterator, x uint64) (bool, error) { - var i uint64 - for ri.HasNext() { - r, err := ri.NextRun() - if err != nil { - return false, err - } - - if i+r.Len > x { - return r.Val, nil - } - - i += r.Len - } - return false, nil -} - -func min(a, b uint64) uint64 { - if a < b { - return a - } - return b -} - -type andIter struct { - a, b RunIterator - ar, br Run -} - -func (ai *andIter) HasNext() bool { - return (ai.ar.Valid() || ai.a.HasNext()) && (ai.br.Valid() || ai.b.HasNext()) -} - -func (ai *andIter) NextRun() (run Run, err error) { - for { - // Ensure we have two valid runs. - if !ai.ar.Valid() { - if !ai.a.HasNext() { - break - } - ai.ar, err = ai.a.NextRun() - if err != nil { - return Run{}, err - } - } - - if !ai.br.Valid() { - if !ai.b.HasNext() { - break - } - ai.br, err = ai.b.NextRun() - if err != nil { - return Run{}, err - } - } - - // && - newVal := ai.ar.Val && ai.br.Val - - // Check to see if we have an ongoing run and if we've changed - // value. - if run.Len > 0 && run.Val != newVal { - return run, nil - } - - newLen := min(ai.ar.Len, ai.br.Len) - - run.Val = newVal - run.Len += newLen - ai.ar.Len -= newLen - ai.br.Len -= newLen - } - - if run.Valid() { - return run, nil - } - - return Run{}, fmt.Errorf("end of runs") -} - -func And(a, b RunIterator) (RunIterator, error) { - if a.HasNext() && b.HasNext() { - return &andIter{a: a, b: b}, nil - } else { - // empty - return new(RunSliceIterator), nil - } -} - -type RunSliceIterator struct { - Runs []Run - i int -} - -func (ri *RunSliceIterator) HasNext() bool { - return ri.i < len(ri.Runs) -} - -func (ri *RunSliceIterator) NextRun() (Run, error) { - if ri.i >= len(ri.Runs) { - return Run{}, fmt.Errorf("end of runs") - } - - out := ri.Runs[ri.i] - ri.i++ - return out, nil -} - -type notIter struct { - it RunIterator -} - -func (ni *notIter) HasNext() bool { - return true -} - -func (ni *notIter) NextRun() (Run, error) { - if !ni.it.HasNext() { - // At this point, we'll keep returning "infinite" runs of true. - return Run{ - Val: true, - Len: math.MaxUint64, - }, nil - } - - nr, err := ni.it.NextRun() - if err != nil { - return Run{}, err - } - - nr.Val = !nr.Val - return nr, nil -} - -func Subtract(a, b RunIterator) (RunIterator, error) { - return And(a, ¬Iter{it: b}) -} - -type nextRun struct { - set bool - run Run - err error -} - -type peekIter struct { - it RunIterator - stash nextRun -} - -func (it *peekIter) HasNext() bool { - if it.stash.set { - return true - } - return it.it.HasNext() -} - -func (it *peekIter) NextRun() (Run, error) { - if it.stash.set { - run := it.stash.run - err := it.stash.err - it.stash = nextRun{} - return run, err - } - - return it.it.NextRun() -} - -func (it *peekIter) put(run Run, err error) { - it.stash = nextRun{ - set: true, - run: run, - err: err, - } -} - -// Returns iterator with all bits up to the last bit set: -// in: 11100000111010001110000 -// out: 1111111111111111111 -func Fill(iter RunIterator) (RunIterator, error) { - var at, length uint64 - for iter.HasNext() { - r, err := iter.NextRun() - if err != nil { - return nil, err - } - - at += r.Len - - if r.Val { - length = at - } - } - - var runs []Run - if length > 0 { - runs = append(runs, Run{ - Val: true, - Len: length, - }) - } - - return &RunSliceIterator{Runs: runs}, nil -} diff --git a/vendor/github.com/filecoin-project/go-bitfield/rle/zipf.go b/vendor/github.com/filecoin-project/go-bitfield/rle/zipf.go deleted file mode 100644 index 70807a6..0000000 --- a/vendor/github.com/filecoin-project/go-bitfield/rle/zipf.go +++ /dev/null @@ -1,31 +0,0 @@ -package rlepluslazy - -import ( - "math" - "math/rand" -) - -func NewFromZipfDist(seed int64, size int) RunIterator { - zipf := rand.NewZipf(rand.New(rand.NewSource(seed)), 1.6978377, 1, math.MaxUint64/(1<<16)) - return &zipfIterator{ - i: size, - zipf: zipf, - } -} - -type zipfIterator struct { - i int - zipf *rand.Zipf -} - -func (zi *zipfIterator) HasNext() bool { - return zi.i != 0 -} - -func (zi *zipfIterator) NextRun() (Run, error) { - zi.i-- - return Run{ - Len: zi.zipf.Uint64() + 1, - Val: zi.i%2 == 0, - }, nil -} diff --git a/vendor/github.com/filecoin-project/go-commp-utils/LICENSE-APACHE b/vendor/github.com/filecoin-project/go-commp-utils/LICENSE-APACHE deleted file mode 100644 index 14478a3..0000000 --- a/vendor/github.com/filecoin-project/go-commp-utils/LICENSE-APACHE +++ /dev/null @@ -1,5 +0,0 @@ -Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. diff --git a/vendor/github.com/filecoin-project/go-commp-utils/LICENSE-MIT b/vendor/github.com/filecoin-project/go-commp-utils/LICENSE-MIT deleted file mode 100644 index 72dc60d..0000000 --- a/vendor/github.com/filecoin-project/go-commp-utils/LICENSE-MIT +++ /dev/null @@ -1,19 +0,0 @@ -The MIT License (MIT) - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/filecoin-project/go-commp-utils/nonffi/commd.go b/vendor/github.com/filecoin-project/go-commp-utils/nonffi/commd.go deleted file mode 100644 index 526c456..0000000 --- a/vendor/github.com/filecoin-project/go-commp-utils/nonffi/commd.go +++ /dev/null @@ -1,129 +0,0 @@ -package nonffi - -import ( - "errors" - "fmt" - "math/bits" - - "github.com/filecoin-project/go-commp-utils/zerocomm" - commcid "github.com/filecoin-project/go-fil-commcid" - "github.com/filecoin-project/go-state-types/abi" - "github.com/ipfs/go-cid" - sha256simd "github.com/minio/sha256-simd" -) - -type stackFrame struct { - size uint64 - commP []byte -} - -func GenerateUnsealedCID(proofType abi.RegisteredSealProof, pieceInfos []abi.PieceInfo) (cid.Cid, error) { - spi, found := abi.SealProofInfos[proofType] - if !found { - return cid.Undef, fmt.Errorf("unknown seal proof type %d", proofType) - } - if len(pieceInfos) == 0 { - return cid.Undef, errors.New("no pieces provided") - } - - maxSize := uint64(spi.SectorSize) - - todo := make([]stackFrame, len(pieceInfos)) - - // sancheck everything - for i, p := range pieceInfos { - if p.Size < 128 { - return cid.Undef, fmt.Errorf("invalid Size of PieceInfo %d: value %d is too small", i, p.Size) - } - if uint64(p.Size) > maxSize { - return cid.Undef, fmt.Errorf("invalid Size of PieceInfo %d: value %d is larger than sector size of SealProofType %d", i, p.Size, proofType) - } - if bits.OnesCount64(uint64(p.Size)) != 1 { - return cid.Undef, fmt.Errorf("invalid Size of PieceInfo %d: value %d is not a power of 2", i, p.Size) - } - - cp, err := commcid.CIDToPieceCommitmentV1(p.PieceCID) - if err != nil { - return cid.Undef, fmt.Errorf("invalid PieceCid for PieceInfo %d: %w", i, err) - } - todo[i] = stackFrame{size: uint64(p.Size), commP: cp} - } - - // reimplement https://github.com/filecoin-project/rust-fil-proofs/blob/380d6437c2/filecoin-proofs/src/pieces.rs#L85-L145 - stack := append( - make( - []stackFrame, - 0, - 32, - ), - todo[0], - ) - - for _, f := range todo[1:] { - - // pre-pad if needed to balance the left limb - for stack[len(stack)-1].size < f.size { - lastSize := stack[len(stack)-1].size - - stack = reduceStack( - append( - stack, - stackFrame{ - size: lastSize, - commP: zeroCommForSize(lastSize), - }, - ), - ) - } - - stack = reduceStack( - append( - stack, - f, - ), - ) - } - - for len(stack) > 1 { - lastSize := stack[len(stack)-1].size - stack = reduceStack( - append( - stack, - stackFrame{ - size: lastSize, - commP: zeroCommForSize(lastSize), - }, - ), - ) - } - - if stack[0].size > maxSize { - return cid.Undef, fmt.Errorf("provided pieces sum up to %d bytes, which is larger than sector size of SealProofType %d", stack[0].size, proofType) - } - - return commcid.PieceCommitmentV1ToCID(stack[0].commP) -} - -var s256 = sha256simd.New() - -func zeroCommForSize(s uint64) []byte { return zerocomm.PieceComms[bits.TrailingZeros64(s)-7][:] } - -func reduceStack(s []stackFrame) []stackFrame { - for len(s) > 1 && s[len(s)-2].size == s[len(s)-1].size { - - s256.Reset() - s256.Write(s[len(s)-2].commP) - s256.Write(s[len(s)-1].commP) - d := s256.Sum(make([]byte, 0, 32)) - d[31] &= 0b00111111 - - s[len(s)-2] = stackFrame{ - size: 2 * s[len(s)-2].size, - commP: d, - } - - s = s[:len(s)-1] - } - - return s -} diff --git a/vendor/github.com/filecoin-project/go-commp-utils/zerocomm/zerocomm.go b/vendor/github.com/filecoin-project/go-commp-utils/zerocomm/zerocomm.go deleted file mode 100644 index 9855a58..0000000 --- a/vendor/github.com/filecoin-project/go-commp-utils/zerocomm/zerocomm.go +++ /dev/null @@ -1,56 +0,0 @@ -package zerocomm - -import ( - "math/bits" - - commcid "github.com/filecoin-project/go-fil-commcid" - "github.com/filecoin-project/go-state-types/abi" - "github.com/ipfs/go-cid" -) - -const Levels = 37 -const Skip = 2 // can't generate for 32, 64b - -var PieceComms = [Levels - Skip][32]byte{ - {0x37, 0x31, 0xbb, 0x99, 0xac, 0x68, 0x9f, 0x66, 0xee, 0xf5, 0x97, 0x3e, 0x4a, 0x94, 0xda, 0x18, 0x8f, 0x4d, 0xdc, 0xae, 0x58, 0x7, 0x24, 0xfc, 0x6f, 0x3f, 0xd6, 0xd, 0xfd, 0x48, 0x83, 0x33}, - {0x64, 0x2a, 0x60, 0x7e, 0xf8, 0x86, 0xb0, 0x4, 0xbf, 0x2c, 0x19, 0x78, 0x46, 0x3a, 0xe1, 0xd4, 0x69, 0x3a, 0xc0, 0xf4, 0x10, 0xeb, 0x2d, 0x1b, 0x7a, 0x47, 0xfe, 0x20, 0x5e, 0x5e, 0x75, 0xf}, - {0x57, 0xa2, 0x38, 0x1a, 0x28, 0x65, 0x2b, 0xf4, 0x7f, 0x6b, 0xef, 0x7a, 0xca, 0x67, 0x9b, 0xe4, 0xae, 0xde, 0x58, 0x71, 0xab, 0x5c, 0xf3, 0xeb, 0x2c, 0x8, 0x11, 0x44, 0x88, 0xcb, 0x85, 0x26}, - {0x1f, 0x7a, 0xc9, 0x59, 0x55, 0x10, 0xe0, 0x9e, 0xa4, 0x1c, 0x46, 0xb, 0x17, 0x64, 0x30, 0xbb, 0x32, 0x2c, 0xd6, 0xfb, 0x41, 0x2e, 0xc5, 0x7c, 0xb1, 0x7d, 0x98, 0x9a, 0x43, 0x10, 0x37, 0x2f}, - {0xfc, 0x7e, 0x92, 0x82, 0x96, 0xe5, 0x16, 0xfa, 0xad, 0xe9, 0x86, 0xb2, 0x8f, 0x92, 0xd4, 0x4a, 0x4f, 0x24, 0xb9, 0x35, 0x48, 0x52, 0x23, 0x37, 0x6a, 0x79, 0x90, 0x27, 0xbc, 0x18, 0xf8, 0x33}, - {0x8, 0xc4, 0x7b, 0x38, 0xee, 0x13, 0xbc, 0x43, 0xf4, 0x1b, 0x91, 0x5c, 0xe, 0xed, 0x99, 0x11, 0xa2, 0x60, 0x86, 0xb3, 0xed, 0x62, 0x40, 0x1b, 0xf9, 0xd5, 0x8b, 0x8d, 0x19, 0xdf, 0xf6, 0x24}, - {0xb2, 0xe4, 0x7b, 0xfb, 0x11, 0xfa, 0xcd, 0x94, 0x1f, 0x62, 0xaf, 0x5c, 0x75, 0xf, 0x3e, 0xa5, 0xcc, 0x4d, 0xf5, 0x17, 0xd5, 0xc4, 0xf1, 0x6d, 0xb2, 0xb4, 0xd7, 0x7b, 0xae, 0xc1, 0xa3, 0x2f}, - {0xf9, 0x22, 0x61, 0x60, 0xc8, 0xf9, 0x27, 0xbf, 0xdc, 0xc4, 0x18, 0xcd, 0xf2, 0x3, 0x49, 0x31, 0x46, 0x0, 0x8e, 0xae, 0xfb, 0x7d, 0x2, 0x19, 0x4d, 0x5e, 0x54, 0x81, 0x89, 0x0, 0x51, 0x8}, - {0x2c, 0x1a, 0x96, 0x4b, 0xb9, 0xb, 0x59, 0xeb, 0xfe, 0xf, 0x6d, 0xa2, 0x9a, 0xd6, 0x5a, 0xe3, 0xe4, 0x17, 0x72, 0x4a, 0x8f, 0x7c, 0x11, 0x74, 0x5a, 0x40, 0xca, 0xc1, 0xe5, 0xe7, 0x40, 0x11}, - {0xfe, 0xe3, 0x78, 0xce, 0xf1, 0x64, 0x4, 0xb1, 0x99, 0xed, 0xe0, 0xb1, 0x3e, 0x11, 0xb6, 0x24, 0xff, 0x9d, 0x78, 0x4f, 0xbb, 0xed, 0x87, 0x8d, 0x83, 0x29, 0x7e, 0x79, 0x5e, 0x2, 0x4f, 0x2}, - {0x8e, 0x9e, 0x24, 0x3, 0xfa, 0x88, 0x4c, 0xf6, 0x23, 0x7f, 0x60, 0xdf, 0x25, 0xf8, 0x3e, 0xe4, 0xd, 0xca, 0x9e, 0xd8, 0x79, 0xeb, 0x6f, 0x63, 0x52, 0xd1, 0x50, 0x84, 0xf5, 0xad, 0xd, 0x3f}, - {0x75, 0x2d, 0x96, 0x93, 0xfa, 0x16, 0x75, 0x24, 0x39, 0x54, 0x76, 0xe3, 0x17, 0xa9, 0x85, 0x80, 0xf0, 0x9, 0x47, 0xaf, 0xb7, 0xa3, 0x5, 0x40, 0xd6, 0x25, 0xa9, 0x29, 0x1c, 0xc1, 0x2a, 0x7}, - {0x70, 0x22, 0xf6, 0xf, 0x7e, 0xf6, 0xad, 0xfa, 0x17, 0x11, 0x7a, 0x52, 0x61, 0x9e, 0x30, 0xce, 0xa8, 0x2c, 0x68, 0x7, 0x5a, 0xdf, 0x1c, 0x66, 0x77, 0x86, 0xec, 0x50, 0x6e, 0xef, 0x2d, 0x19}, - {0xd9, 0x98, 0x87, 0xb9, 0x73, 0x57, 0x3a, 0x96, 0xe1, 0x13, 0x93, 0x64, 0x52, 0x36, 0xc1, 0x7b, 0x1f, 0x4c, 0x70, 0x34, 0xd7, 0x23, 0xc7, 0xa9, 0x9f, 0x70, 0x9b, 0xb4, 0xda, 0x61, 0x16, 0x2b}, - {0xd0, 0xb5, 0x30, 0xdb, 0xb0, 0xb4, 0xf2, 0x5c, 0x5d, 0x2f, 0x2a, 0x28, 0xdf, 0xee, 0x80, 0x8b, 0x53, 0x41, 0x2a, 0x2, 0x93, 0x1f, 0x18, 0xc4, 0x99, 0xf5, 0xa2, 0x54, 0x8, 0x6b, 0x13, 0x26}, - {0x84, 0xc0, 0x42, 0x1b, 0xa0, 0x68, 0x5a, 0x1, 0xbf, 0x79, 0x5a, 0x23, 0x44, 0x6, 0x4f, 0xe4, 0x24, 0xbd, 0x52, 0xa9, 0xd2, 0x43, 0x77, 0xb3, 0x94, 0xff, 0x4c, 0x4b, 0x45, 0x68, 0xe8, 0x11}, - {0x65, 0xf2, 0x9e, 0x5d, 0x98, 0xd2, 0x46, 0xc3, 0x8b, 0x38, 0x8c, 0xfc, 0x6, 0xdb, 0x1f, 0x6b, 0x2, 0x13, 0x3, 0xc5, 0xa2, 0x89, 0x0, 0xb, 0xdc, 0xe8, 0x32, 0xa9, 0xc3, 0xec, 0x42, 0x1c}, - {0xa2, 0x24, 0x75, 0x8, 0x28, 0x58, 0x50, 0x96, 0x5b, 0x7e, 0x33, 0x4b, 0x31, 0x27, 0xb0, 0xc0, 0x42, 0xb1, 0xd0, 0x46, 0xdc, 0x54, 0x40, 0x21, 0x37, 0x62, 0x7c, 0xd8, 0x79, 0x9c, 0xe1, 0x3a}, - {0xda, 0xfd, 0xab, 0x6d, 0xa9, 0x36, 0x44, 0x53, 0xc2, 0x6d, 0x33, 0x72, 0x6b, 0x9f, 0xef, 0xe3, 0x43, 0xbe, 0x8f, 0x81, 0x64, 0x9e, 0xc0, 0x9, 0xaa, 0xd3, 0xfa, 0xff, 0x50, 0x61, 0x75, 0x8}, - {0xd9, 0x41, 0xd5, 0xe0, 0xd6, 0x31, 0x4a, 0x99, 0x5c, 0x33, 0xff, 0xbd, 0x4f, 0xbe, 0x69, 0x11, 0x8d, 0x73, 0xd4, 0xe5, 0xfd, 0x2c, 0xd3, 0x1f, 0xf, 0x7c, 0x86, 0xeb, 0xdd, 0x14, 0xe7, 0x6}, - {0x51, 0x4c, 0x43, 0x5c, 0x3d, 0x4, 0xd3, 0x49, 0xa5, 0x36, 0x5f, 0xbd, 0x59, 0xff, 0xc7, 0x13, 0x62, 0x91, 0x11, 0x78, 0x59, 0x91, 0xc1, 0xa3, 0xc5, 0x3a, 0xf2, 0x20, 0x79, 0x74, 0x1a, 0x2f}, - {0xad, 0x6, 0x85, 0x39, 0x69, 0xd3, 0x7d, 0x34, 0xff, 0x8, 0xe0, 0x9f, 0x56, 0x93, 0xa, 0x4a, 0xd1, 0x9a, 0x89, 0xde, 0xf6, 0xc, 0xbf, 0xee, 0x7e, 0x1d, 0x33, 0x81, 0xc1, 0xe7, 0x1c, 0x37}, - {0x39, 0x56, 0xe, 0x7b, 0x13, 0xa9, 0x3b, 0x7, 0xa2, 0x43, 0xfd, 0x27, 0x20, 0xff, 0xa7, 0xcb, 0x3e, 0x1d, 0x2e, 0x50, 0x5a, 0xb3, 0x62, 0x9e, 0x79, 0xf4, 0x63, 0x13, 0x51, 0x2c, 0xda, 0x6}, - {0xcc, 0xc3, 0xc0, 0x12, 0xf5, 0xb0, 0x5e, 0x81, 0x1a, 0x2b, 0xbf, 0xdd, 0xf, 0x68, 0x33, 0xb8, 0x42, 0x75, 0xb4, 0x7b, 0xf2, 0x29, 0xc0, 0x5, 0x2a, 0x82, 0x48, 0x4f, 0x3c, 0x1a, 0x5b, 0x3d}, - {0x7d, 0xf2, 0x9b, 0x69, 0x77, 0x31, 0x99, 0xe8, 0xf2, 0xb4, 0xb, 0x77, 0x91, 0x9d, 0x4, 0x85, 0x9, 0xee, 0xd7, 0x68, 0xe2, 0xc7, 0x29, 0x7b, 0x1f, 0x14, 0x37, 0x3, 0x4f, 0xc3, 0xc6, 0x2c}, - {0x66, 0xce, 0x5, 0xa3, 0x66, 0x75, 0x52, 0xcf, 0x45, 0xc0, 0x2b, 0xcc, 0x4e, 0x83, 0x92, 0x91, 0x9b, 0xde, 0xac, 0x35, 0xde, 0x2f, 0xf5, 0x62, 0x71, 0x84, 0x8e, 0x9f, 0x7b, 0x67, 0x51, 0x7}, - {0xd8, 0x61, 0x2, 0x18, 0x42, 0x5a, 0xb5, 0xe9, 0x5b, 0x1c, 0xa6, 0x23, 0x9d, 0x29, 0xa2, 0xe4, 0x20, 0xd7, 0x6, 0xa9, 0x6f, 0x37, 0x3e, 0x2f, 0x9c, 0x9a, 0x91, 0xd7, 0x59, 0xd1, 0x9b, 0x1}, - {0x6d, 0x36, 0x4b, 0x1e, 0xf8, 0x46, 0x44, 0x1a, 0x5a, 0x4a, 0x68, 0x86, 0x23, 0x14, 0xac, 0xc0, 0xa4, 0x6f, 0x1, 0x67, 0x17, 0xe5, 0x34, 0x43, 0xe8, 0x39, 0xee, 0xdf, 0x83, 0xc2, 0x85, 0x3c}, - {0x7, 0x7e, 0x5f, 0xde, 0x35, 0xc5, 0xa, 0x93, 0x3, 0xa5, 0x50, 0x9, 0xe3, 0x49, 0x8a, 0x4e, 0xbe, 0xdf, 0xf3, 0x9c, 0x42, 0xb7, 0x10, 0xb7, 0x30, 0xd8, 0xec, 0x7a, 0xc7, 0xaf, 0xa6, 0x3e}, - {0xe6, 0x40, 0x5, 0xa6, 0xbf, 0xe3, 0x77, 0x79, 0x53, 0xb8, 0xad, 0x6e, 0xf9, 0x3f, 0xf, 0xca, 0x10, 0x49, 0xb2, 0x4, 0x16, 0x54, 0xf2, 0xa4, 0x11, 0xf7, 0x70, 0x27, 0x99, 0xce, 0xce, 0x2}, - {0x25, 0x9d, 0x3d, 0x6b, 0x1f, 0x4d, 0x87, 0x6d, 0x11, 0x85, 0xe1, 0x12, 0x3a, 0xf6, 0xf5, 0x50, 0x1a, 0xf0, 0xf6, 0x7c, 0xf1, 0x5b, 0x52, 0x16, 0x25, 0x5b, 0x7b, 0x17, 0x8d, 0x12, 0x5, 0x1d}, - {0x3f, 0x9a, 0x4d, 0x41, 0x1d, 0xa4, 0xef, 0x1b, 0x36, 0xf3, 0x5f, 0xf0, 0xa1, 0x95, 0xae, 0x39, 0x2a, 0xb2, 0x3f, 0xee, 0x79, 0x67, 0xb7, 0xc4, 0x1b, 0x3, 0xd1, 0x61, 0x3f, 0xc2, 0x92, 0x39}, - {0xfe, 0x4e, 0xf3, 0x28, 0xc6, 0x1a, 0xa3, 0x9c, 0xfd, 0xb2, 0x48, 0x4e, 0xaa, 0x32, 0xa1, 0x51, 0xb1, 0xfe, 0x3d, 0xfd, 0x1f, 0x96, 0xdd, 0x8c, 0x97, 0x11, 0xfd, 0x86, 0xd6, 0xc5, 0x81, 0x13}, - {0xf5, 0x5d, 0x68, 0x90, 0xe, 0x2d, 0x83, 0x81, 0xec, 0xcb, 0x81, 0x64, 0xcb, 0x99, 0x76, 0xf2, 0x4b, 0x2d, 0xe0, 0xdd, 0x61, 0xa3, 0x1b, 0x97, 0xce, 0x6e, 0xb2, 0x38, 0x50, 0xd5, 0xe8, 0x19}, - {0xaa, 0xaa, 0x8c, 0x4c, 0xb4, 0xa, 0xac, 0xee, 0x1e, 0x2, 0xdc, 0x65, 0x42, 0x4b, 0x2a, 0x6c, 0x8e, 0x99, 0xf8, 0x3, 0xb7, 0x2f, 0x79, 0x29, 0xc4, 0x10, 0x1d, 0x7f, 0xae, 0x6b, 0xff, 0x32}, -} - -func ZeroPieceCommitment(sz abi.UnpaddedPieceSize) cid.Cid { - level := bits.TrailingZeros64(uint64(sz.Padded())) - Skip - 5 // 2^5 = 32 - commP, _ := commcid.PieceCommitmentV1ToCID(PieceComms[level][:]) - return commP -} diff --git a/vendor/github.com/filecoin-project/go-fil-commcid/CONTRIBUTING.md b/vendor/github.com/filecoin-project/go-fil-commcid/CONTRIBUTING.md deleted file mode 100644 index 66c8b0d..0000000 --- a/vendor/github.com/filecoin-project/go-fil-commcid/CONTRIBUTING.md +++ /dev/null @@ -1,68 +0,0 @@ -# Contributing to this repo - -First, thank you for your interest in contributing to this project! Before you pick up your first issue and start -changing code, please: - -1. Review all documentation for the module you're interested in. -1. Look through the [issues for this repo](https://github.com/filecoin-project/go-fil-commcid/issues) for relevant discussions. -1. If you have questions about an issue, post a comment in the issue. -1. If you want to submit changes that aren't covered by an issue, file a new one with your proposal, outlining what problem you found/feature you want to implement, and how you intend to implement a solution. - -For best results, before submitting a PR, make sure: -1. It has met all acceptance criteria for the issue. -1. It addresses only the one issue and does not make other, irrelevant changes. -1. Your code conforms to our coding style guide. -1. You have adequate test coverage (this should be indicated by CI results anyway). -1. If you like, check out [current PRs](https://github.com/filecoin-project/go-fil-commcid/pulls) to see how others do it. - -Special Note: -If editing README.md, please conform to the [standard readme specification](https://github.com/RichardLitt/standard-readme/blob/master/spec.md). - -Before a PR can be merged to `master`, it must: -1. Pass continuous integration. -1. Be approved by at least two maintainers - -### Testing - -- All new code should be accompanied by unit tests. Prefer focused unit tests to integration tests for thorough validation of behaviour. Existing code is not necessarily a good model, here. -- Integration tests should test integration, not comprehensive functionality -- Tests should be placed in a separate package named `$PACKAGE_test`. For example, a test of the `chain` package should live in a package named `chain_test`. In limited situations, exceptions may be made for some "white box" tests placed in the same package as the code it tests. - -### Conventions and Style - -#### Imports -We use the following import ordering. -``` -import ( - [stdlib packages, alpha-sorted] - - [external packages] - - [go-filecoin packages] -) -``` - -Where a package name does not match its directory name, an explicit alias is expected (`goimports` will add this for you). - -Example: - -```go -import ( - "context" - "testing" - - cmds "github.com/ipfs/go-ipfs-cmds" - cid "github.com/ipfs/go-cid" - ipld "github.com/ipfs/go-ipld-format" - "github.com/stretchr/testify/assert" - - "github.com/filecoin-project/go-fil-commcid/filestore/file" -) -``` - -#### Comments -Comments are a communication to other developers (including your future self) to help them understand and maintain code. Good comments describe the _intent_ of the code, without repeating the procedures directly. - -- A `TODO:` comment describes a change that is desired but could not be immediately implemented. It must include a reference to a GitHub issue outlining whatever prevents the thing being done now (which could just be a matter of priority). -- A `NOTE:` comment indicates an aside, some background info, or ideas for future improvement, rather than the intent of the current code. It's often fine to document such ideas alongside the code rather than an issue (at the loss of a space for discussion). -- `FIXME`, `HACK`, `XXX` and similar tags indicating that some code is to be avoided in favour of `TODO`, `NOTE` or some straight prose. \ No newline at end of file diff --git a/vendor/github.com/filecoin-project/go-fil-commcid/COPYRIGHT b/vendor/github.com/filecoin-project/go-fil-commcid/COPYRIGHT deleted file mode 100644 index 771e6f7..0000000 --- a/vendor/github.com/filecoin-project/go-fil-commcid/COPYRIGHT +++ /dev/null @@ -1,3 +0,0 @@ -Copyright 2019. Protocol Labs, Inc. - -This library is dual-licensed under Apache 2.0 and MIT terms. diff --git a/vendor/github.com/filecoin-project/go-fil-commcid/LICENSE-APACHE b/vendor/github.com/filecoin-project/go-fil-commcid/LICENSE-APACHE deleted file mode 100644 index 5465143..0000000 --- a/vendor/github.com/filecoin-project/go-fil-commcid/LICENSE-APACHE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2019. Protocol Labs, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/github.com/filecoin-project/go-fil-commcid/LICENSE-MIT b/vendor/github.com/filecoin-project/go-fil-commcid/LICENSE-MIT deleted file mode 100644 index ea532a8..0000000 --- a/vendor/github.com/filecoin-project/go-fil-commcid/LICENSE-MIT +++ /dev/null @@ -1,19 +0,0 @@ -Copyright 2019. Protocol Labs, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/filecoin-project/go-fil-commcid/Makefile b/vendor/github.com/filecoin-project/go-fil-commcid/Makefile deleted file mode 100644 index f6a09e9..0000000 --- a/vendor/github.com/filecoin-project/go-fil-commcid/Makefile +++ /dev/null @@ -1,13 +0,0 @@ -all: build -.PHONY: all - -SUBMODULES= - -commcid: - go build ./... -.PHONY: filestore -SUBMODULES+=commcid - -build: $(SUBMODULES) - -clean: \ No newline at end of file diff --git a/vendor/github.com/filecoin-project/go-fil-commcid/README.md b/vendor/github.com/filecoin-project/go-fil-commcid/README.md deleted file mode 100644 index 920c0d2..0000000 --- a/vendor/github.com/filecoin-project/go-fil-commcid/README.md +++ /dev/null @@ -1,109 +0,0 @@ -# go-fil-commcid -[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io) -[![CircleCI](https://circleci.com/gh/filecoin-project/go-fil-commcid.svg?style=svg)](https://circleci.com/gh/filecoin-project/go-fil-commcid) -[![codecov](https://codecov.io/gh/filecoin-project/go-fil-commcid/branch/master/graph/badge.svg)](https://codecov.io/gh/filecoin-project/go-fil-commcid) - -Conversion Utilities Between CID and Piece/Data/Replica Commitments - -## Description - -This provides utility functions to convert from -commitment hashes used by Filecoin and Content IDs that meet [the CIDv1 standard](https://github.com/multiformats/cid) - -## Table of Contents -* [Background](https://github.com/filecoin-project/go-fil-commcid/tree/master#background) -* [Usage](https://github.com/filecoin-project/go-fil-commcid/tree/master#usage) -* [Contribute](https://github.com/filecoin-project/go-fil-commcid/tree/master#contribute) - -## Background - -See the [Filecoin PoRep Spec](https://filecoin-project.github.io/specs/#algorithms__porep) and the [Filecoin Paper](https://filecoin.io/filecoin.pdf) for how these commitment hashes (Piece Commitment, Data Commitment, Replica Commitment) are generated. - -This library adds codes neccesary to convert those commitment hashes to CIDs - -We define two combinations of `codec` and `multihash`: -- [fil-commitment-unsealed](https://github.com/multiformats/multicodec/blob/bf5c4806e/table.csv#L435) + [sha2-256-trunc254-padded](https://github.com/multiformats/multicodec/blob/bf5c4806e/table.csv#L110) for Piece Commitments and Data Commitments (shared due to identical underlying structure) -- [fil-commitment-sealed](https://github.com/multiformats/multicodec/blob/bf5c4806e/table.csv#L436) + [poseidon-bls12_381-a2-fc1](https://github.com/multiformats/multicodec/blob/bf5c4806e/table.csv#L433) for Replica Commitments - -## Usage - -**Requires go 1.13** - -Install the module in your package or app with `go get "github.com/filecoin-project/go-fil-commcid"` - -### Generating CIDs for CommP, CommD, CommR - -```golang -package mypackage - -import ( - commcid "github.com/filecoin-project/go-fil-commcid" -) - -var commP []byte -var commD []byte -var commR []byte - -// will error if the given commX is not the expected size (currently 32 bytes) -pieceCID, err := commcid.PieceCommitmentV1ToCID(commP) -unsealedSectorCID, err := commcid.DataCommitmentV1ToCID(commD) -sealedSectorCID, err := commcid.ReplicaCommitmentV1ToCID(commR) - -``` - -### Getting a raw CommP, CommR, CommD from a CID - -```golang -package mypackage - -import ( - commcid "github.com/filecoin-project/go-fil-commcid" -) - -var pieceCID cid.Cid -var unsealedSectorCID cid.Cid -var sealedSectorCID cid.Cid - -// will error if pieceCID does not have the correct codec & hash type -commP, err := commcid.CIDToPieceCommitmentV1(pieceCID) - -// will error if unsealedSectorCID does not have the correct codec & hash type -commD, err := commcid.CIDToDataCommitmentV1(unsealedSectorCID) - -// will error if sealedSectorCID does not have the correct codec & hash type -commR, err := commcid.CIDToReplicaCommitmentV1(sealedSectorCID) -``` - -### Going from arbitrary commitment to CID and back - -As Filecoin evolves, there will likely be new and better constructions for both sealed and unsealed data. Note `V1` in front of the above method names. - -To support future evolution, we provide more generalized methods for -going back and forth: - - -```golang -package mypackage - -import ( - commcid "github.com/filecoin-project/go-fil-commcid" -) - -var commIn []byte -var filCodec commcid.FilMultiCodec -var filHashAlg commcid.FilMultiHash - -commCID, err := commcid.CommmitmentToCID(filCodecIn, filHashAlgIn, commIn) - -filCodecOut, filHashOut, commOut, err := commcid.CIDToCommitment(commCID) -``` - -## Contributing -PRs are welcome! Please first read the design docs and look over the current code. PRs against -master require approval of at least two maintainers. For the rest, please see our -[CONTRIBUTING](https://github.com/filecoin-project/go-fil-commcid/CONTRIBUTING.md) guide. - -## License -This repository is dual-licensed under Apache 2.0 and MIT terms. - -Copyright 2019. Protocol Labs, Inc. diff --git a/vendor/github.com/filecoin-project/go-fil-commcid/SECURITY.md b/vendor/github.com/filecoin-project/go-fil-commcid/SECURITY.md deleted file mode 100644 index 0e810df..0000000 --- a/vendor/github.com/filecoin-project/go-fil-commcid/SECURITY.md +++ /dev/null @@ -1,9 +0,0 @@ -# Security Policy - -## Reporting a Vulnerability - -For reporting *critical* and *security* bugs, please consult our [Security Policy and Responsible Disclosure Program information](https://github.com/filecoin-project/community/blob/master/SECURITY.md) - -## Reporting a non security bug - -For non-critical bugs, please simply file a GitHub issue on this repo. diff --git a/vendor/github.com/filecoin-project/go-fil-commcid/codecov.yml b/vendor/github.com/filecoin-project/go-fil-commcid/codecov.yml deleted file mode 100644 index 884c0ee..0000000 --- a/vendor/github.com/filecoin-project/go-fil-commcid/codecov.yml +++ /dev/null @@ -1,8 +0,0 @@ -coverage: - precision: 2 - round: up - range: "50...90" - status: - project: off - patch: off - diff --git a/vendor/github.com/filecoin-project/go-fil-commcid/commcid.go b/vendor/github.com/filecoin-project/go-fil-commcid/commcid.go deleted file mode 100644 index 2c62a8c..0000000 --- a/vendor/github.com/filecoin-project/go-fil-commcid/commcid.go +++ /dev/null @@ -1,149 +0,0 @@ -// Package commcid provides helpers to convert between Piece/Data/Replica -// Commitments and their CID representation -package commcid - -import ( - "errors" - "fmt" - - "github.com/ipfs/go-cid" - "github.com/multiformats/go-multihash" - "github.com/multiformats/go-varint" - "golang.org/x/xerrors" -) - -// FilMultiCodec is a uint64-sized type representing a Filecoin-specific codec -type FilMultiCodec uint64 - -// FilMultiHash is a uint64-sized type representing a Filecoin-specific multihash -type FilMultiHash uint64 - -// FILCODEC_UNDEFINED is just a signifier for "no codec determined -const FILCODEC_UNDEFINED = FilMultiCodec(0) - -// FILMULTIHASH_UNDEFINED is a signifier for "no multihash etermined" -const FILMULTIHASH_UNDEFINED = FilMultiHash(0) - -var ( - // ErrIncorrectCodec means the codec for a CID is a block format that does not match - // a commitment hash - ErrIncorrectCodec = errors.New("unexpected commitment codec") - // ErrIncorrectHash means the hash function for this CID does not match the expected - // hash for this type of commitment - ErrIncorrectHash = errors.New("incorrect hashing function for data commitment") -) - -// CommitmentToCID converts a raw commitment hash to a CID -// by adding: -// - the given filecoin codec type -// - the given filecoin hash type -func CommitmentToCID(mc FilMultiCodec, mh FilMultiHash, commX []byte) (cid.Cid, error) { - if err := validateFilecoinCidSegments(mc, mh, commX); err != nil { - return cid.Undef, err - } - - mhBuf := make( - []byte, - (varint.UvarintSize(uint64(mh)) + varint.UvarintSize(uint64(len(commX))) + len(commX)), - ) - - pos := varint.PutUvarint(mhBuf, uint64(mh)) - pos += varint.PutUvarint(mhBuf[pos:], uint64(len(commX))) - copy(mhBuf[pos:], commX) - - return cid.NewCidV1(uint64(mc), multihash.Multihash(mhBuf)), nil -} - -// CIDToCommitment extracts the raw commitment bytes, the FilMultiCodec and -// FilMultiHash from a CID, after validating that the codec and hash type are -// consistent -func CIDToCommitment(c cid.Cid) (FilMultiCodec, FilMultiHash, []byte, error) { - decoded, err := multihash.Decode([]byte(c.Hash())) - if err != nil { - return FILCODEC_UNDEFINED, FILMULTIHASH_UNDEFINED, nil, xerrors.Errorf("Error decoding data commitment hash: %w", err) - } - - filCodec := FilMultiCodec(c.Type()) - filMh := FilMultiHash(decoded.Code) - if err := validateFilecoinCidSegments(filCodec, filMh, decoded.Digest); err != nil { - return FILCODEC_UNDEFINED, FILMULTIHASH_UNDEFINED, nil, err - } - - return filCodec, filMh, decoded.Digest, nil -} - -// DataCommitmentV1ToCID converts a raw data commitment to a CID -// by adding: -// - codec: cid.FilCommitmentUnsealed -// - hash type: multihash.SHA2_256_TRUNC254_PADDED -func DataCommitmentV1ToCID(commD []byte) (cid.Cid, error) { - return CommitmentToCID(cid.FilCommitmentUnsealed, multihash.SHA2_256_TRUNC254_PADDED, commD) -} - -// CIDToDataCommitmentV1 extracts the raw data commitment from a CID -// after checking for the correct codec and hash types. -func CIDToDataCommitmentV1(c cid.Cid) ([]byte, error) { - codec, _, commD, err := CIDToCommitment(c) - if err != nil { - return nil, err - } - if codec != cid.FilCommitmentUnsealed { - return nil, ErrIncorrectCodec - } - return commD, nil -} - -// ReplicaCommitmentV1ToCID converts a raw data commitment to a CID -// by adding: -// - codec: cid.FilCommitmentSealed -// - hash type: multihash.POSEIDON_BLS12_381_A1_FC1 -func ReplicaCommitmentV1ToCID(commR []byte) (cid.Cid, error) { - return CommitmentToCID(cid.FilCommitmentSealed, multihash.POSEIDON_BLS12_381_A1_FC1, commR) -} - -// CIDToReplicaCommitmentV1 extracts the raw replica commitment from a CID -// after checking for the correct codec and hash types. -func CIDToReplicaCommitmentV1(c cid.Cid) ([]byte, error) { - codec, _, commR, err := CIDToCommitment(c) - if err != nil { - return nil, err - } - if codec != cid.FilCommitmentSealed { - return nil, ErrIncorrectCodec - } - return commR, nil -} - -// ValidateFilecoinCidSegments returns an error if the provided CID parts -// conflict with each other. -func validateFilecoinCidSegments(mc FilMultiCodec, mh FilMultiHash, commX []byte) error { - - switch mc { - case cid.FilCommitmentUnsealed: - if mh != multihash.SHA2_256_TRUNC254_PADDED { - return ErrIncorrectHash - } - case cid.FilCommitmentSealed: - if mh != multihash.POSEIDON_BLS12_381_A1_FC1 { - return ErrIncorrectHash - } - default: // neither of the codecs above: we are not in Fil teritory - return ErrIncorrectCodec - } - - if len(commX) != 32 { - return fmt.Errorf("commitments must be 32 bytes long") - } - - return nil -} - -// PieceCommitmentV1ToCID converts a commP to a CID -// -- it is just a helper function that is equivalent to -// DataCommitmentV1ToCID. -var PieceCommitmentV1ToCID = DataCommitmentV1ToCID - -// CIDToPieceCommitmentV1 converts a CID to a commP -// -- it is just a helper function that is equivalent to -// CIDToDataCommitmentV1. -var CIDToPieceCommitmentV1 = CIDToDataCommitmentV1 diff --git a/vendor/github.com/filecoin-project/go-hamt-ipld/v3/.gitignore b/vendor/github.com/filecoin-project/go-hamt-ipld/v3/.gitignore deleted file mode 100644 index 398baf2..0000000 --- a/vendor/github.com/filecoin-project/go-hamt-ipld/v3/.gitignore +++ /dev/null @@ -1,17 +0,0 @@ -# Binaries for programs and plugins -*.exe -*.exe~ -*.dll -*.so -*.dylib - -# Test binary, built with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - -# Dependency directories (remove the comment below to include it) -# vendor/ - -.idea diff --git a/vendor/github.com/filecoin-project/go-hamt-ipld/v3/LICENSE b/vendor/github.com/filecoin-project/go-hamt-ipld/v3/LICENSE deleted file mode 100644 index 83f48ce..0000000 --- a/vendor/github.com/filecoin-project/go-hamt-ipld/v3/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2017 Whyrusleeping - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/filecoin-project/go-hamt-ipld/v3/Makefile b/vendor/github.com/filecoin-project/go-hamt-ipld/v3/Makefile deleted file mode 100644 index d6816b3..0000000 --- a/vendor/github.com/filecoin-project/go-hamt-ipld/v3/Makefile +++ /dev/null @@ -1,19 +0,0 @@ -all: build - -build: - go build ./... -.PHONY: build - -test: - go test ./... -.PHONY: test - -coverage: - go test -coverprofile=coverage.out ./... - go tool cover -html=coverage.out -.PHONY: coverage - -benchmark: - go test -bench=./... -.PHONY: benchmark - diff --git a/vendor/github.com/filecoin-project/go-hamt-ipld/v3/README.md b/vendor/github.com/filecoin-project/go-hamt-ipld/v3/README.md deleted file mode 100644 index 21fb28c..0000000 --- a/vendor/github.com/filecoin-project/go-hamt-ipld/v3/README.md +++ /dev/null @@ -1,26 +0,0 @@ -go-hamt-ipld -================== - -[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](https://protocol.ai/) -[![Travis CI](https://travis-ci.org/filecoin-project/go-hamt-ipld.svg?branch=master)](https://travis-ci.org/filecoin-project/go-hamt-ipld) - -**This package is a reference implementation of the IPLD HAMT used in the -Filecoin blockchain.** It includes some optional flexibility such that it may -be used for other purposes outside of Filecoin. - -HAMT is a ["hash array mapped trie"](https://en.wikipedia.org/wiki/Hash_array_mapped_trie). -This implementation extends the standard form by including buckets for the -key/value pairs at storage leaves and [CHAMP mutation semantics](https://michael.steindorfer.name/publications/oopsla15.pdf). -The CHAMP invariant and mutation rules provide us with the ability to maintain -canonical forms given any set of keys and their values, regardless of insertion -order and intermediate data insertion and deletion. Therefore, for any given -set of keys and their values, a HAMT using the same parameters and CHAMP -semantics, the root node should always produce the same content identifier -(CID). - -**See https://godoc.org/github.com/filecoin-project/go-hamt-ipld for more information and -API details.** - -## License - -MIT © Whyrusleeping diff --git a/vendor/github.com/filecoin-project/go-hamt-ipld/v3/cbor_gen.go b/vendor/github.com/filecoin-project/go-hamt-ipld/v3/cbor_gen.go deleted file mode 100644 index 61c9156..0000000 --- a/vendor/github.com/filecoin-project/go-hamt-ipld/v3/cbor_gen.go +++ /dev/null @@ -1,222 +0,0 @@ -// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. - -package hamt - -import ( - "fmt" - "io" - "math/big" - - cbg "github.com/whyrusleeping/cbor-gen" - xerrors "golang.org/x/xerrors" -) - -// NOTE: This is a generated file, but it has been modified to encode the -// bitfield big.Int as a byte array. The bitfield is only a big.Int because -// thats a convenient type for the operations we need to perform on it, but it -// is fundamentally an array of bytes (bits) - -var _ = xerrors.Errorf - -var lengthBufNode = []byte{130} - -func (t *Node) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufNode); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Bitfield (big.Int) (struct) - { - var b []byte - if t.Bitfield != nil { - b = t.Bitfield.Bytes() - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(b))); err != nil { - return err - } - if _, err := w.Write(b); err != nil { - return err - } - } - - // t.Pointers ([]*hamt.Pointer) (slice) - if len(t.Pointers) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Pointers was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Pointers))); err != nil { - return err - } - for _, v := range t.Pointers { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - return nil -} - -func (t *Node) UnmarshalCBOR(r io.Reader) error { - *t = Node{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Bitfield (big.Int) (struct) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if maj != cbg.MajByteString { - return fmt.Errorf("big ints should be tagged cbor byte strings") - } - - if extra > 256 { - return fmt.Errorf("t.Bitfield: cbor bignum was too large") - } - - if extra > 0 { - buf := make([]byte, extra) - if _, err := io.ReadFull(br, buf); err != nil { - return err - } - t.Bitfield = big.NewInt(0).SetBytes(buf) - } else { - t.Bitfield = big.NewInt(0) - } - // t.Pointers ([]*hamt.Pointer) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Pointers: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Pointers = make([]*Pointer, extra) - } - - for i := 0; i < int(extra); i++ { - - var v Pointer - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.Pointers[i] = &v - } - - return nil -} - -var lengthBufKV = []byte{130} - -func (t *KV) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufKV); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Key ([]uint8) (slice) - if len(t.Key) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.Key was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Key))); err != nil { - return err - } - - if _, err := w.Write(t.Key[:]); err != nil { - return err - } - - // t.Value (typegen.Deferred) (struct) - if err := t.Value.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *KV) UnmarshalCBOR(r io.Reader) error { - *t = KV{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Key ([]uint8) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.Key: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - - if extra > 0 { - t.Key = make([]byte, extra) - } - - if _, err := io.ReadFull(br, t.Key[:]); err != nil { - return err - } - // t.Value (typegen.Deferred) (struct) - - { - - t.Value = new(cbg.Deferred) - - if err := t.Value.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("failed to read deferred field: %w", err) - } - } - return nil -} diff --git a/vendor/github.com/filecoin-project/go-hamt-ipld/v3/codecov.yml b/vendor/github.com/filecoin-project/go-hamt-ipld/v3/codecov.yml deleted file mode 100644 index 5f88a9e..0000000 --- a/vendor/github.com/filecoin-project/go-hamt-ipld/v3/codecov.yml +++ /dev/null @@ -1,3 +0,0 @@ -coverage: - range: "50...100" -comment: off diff --git a/vendor/github.com/filecoin-project/go-hamt-ipld/v3/diff.go b/vendor/github.com/filecoin-project/go-hamt-ipld/v3/diff.go deleted file mode 100644 index 3af3d81..0000000 --- a/vendor/github.com/filecoin-project/go-hamt-ipld/v3/diff.go +++ /dev/null @@ -1,254 +0,0 @@ -package hamt - -import ( - "bytes" - "context" - "encoding/json" - - "github.com/ipfs/go-cid" - cbor "github.com/ipfs/go-ipld-cbor" - cbg "github.com/whyrusleeping/cbor-gen" - "golang.org/x/xerrors" -) - -// ChangeType denotes type of change in Change -type ChangeType int - -// These constants define the changes that can be applied to a DAG. -const ( - Add ChangeType = iota - Remove - Modify -) - -// Change represents a change to a DAG and contains a reference to the old and -// new CIDs. -type Change struct { - Type ChangeType - Key string - Before *cbg.Deferred - After *cbg.Deferred -} - -func (ch Change) String() string { - b, _ := json.Marshal(ch) - return string(b) -} - -// Diff returns a set of changes that transform node 'prev' into node 'cur'. opts are applied to both prev and cur. -func Diff(ctx context.Context, prevBs, curBs cbor.IpldStore, prev, cur cid.Cid, opts ...Option) ([]*Change, error) { - if prev.Equals(cur) { - return nil, nil - } - - prevHamt, err := LoadNode(ctx, prevBs, prev, opts...) - if err != nil { - return nil, err - } - - curHamt, err := LoadNode(ctx, curBs, cur, opts...) - if err != nil { - return nil, err - } - - if curHamt.bitWidth != prevHamt.bitWidth { - return nil, xerrors.Errorf("diffing HAMTs with differing bitWidths not supported (prev=%d, cur=%d)", prevHamt.bitWidth, curHamt.bitWidth) - } - return diffNode(ctx, prevHamt, curHamt, 0) -} - -func diffNode(ctx context.Context, pre, cur *Node, depth int) ([]*Change, error) { - // which Bitfield contains the most bits. We will start a loop from this index, calling Bitfield.Bit(idx) - // on an out of range index will return zero. - bp := cur.Bitfield.BitLen() - if pre.Bitfield.BitLen() > bp { - bp = pre.Bitfield.BitLen() - } - - // the changes between cur and prev - var changes []*Change - - // loop over each bit in the bitfields - for idx := bp; idx >= 0; idx-- { - preBit := pre.Bitfield.Bit(idx) - curBit := cur.Bitfield.Bit(idx) - - if preBit == 1 && curBit == 1 { - // index for pre and cur will be unique to each, calculate it here. - prePointer := pre.getPointer(byte(pre.indexForBitPos(idx))) - curPointer := cur.getPointer(byte(cur.indexForBitPos(idx))) - - // both pointers are shards, recurse down the tree. - if prePointer.isShard() && curPointer.isShard() { - preChild, err := prePointer.loadChild(ctx, pre.store, pre.bitWidth, pre.hash) - if err != nil { - return nil, err - } - curChild, err := curPointer.loadChild(ctx, cur.store, cur.bitWidth, cur.hash) - if err != nil { - return nil, err - } - - change, err := diffNode(ctx, preChild, curChild, depth+1) - if err != nil { - return nil, err - } - changes = append(changes, change...) - } - - // check if KV's from cur exists in any children of pre's child. - if prePointer.isShard() && !curPointer.isShard() { - childKV, err := prePointer.loadChildKVs(ctx, pre.store, pre.bitWidth, pre.hash) - if err != nil { - return nil, err - } - changes = append(changes, diffKVs(childKV, curPointer.KVs, idx)...) - - } - - // check if KV's from pre exists in any children of cur's child. - if !prePointer.isShard() && curPointer.isShard() { - childKV, err := curPointer.loadChildKVs(ctx, cur.store, cur.bitWidth, cur.hash) - if err != nil { - return nil, err - } - changes = append(changes, diffKVs(prePointer.KVs, childKV, idx)...) - } - - // both contain KVs, compare. - if !prePointer.isShard() && !curPointer.isShard() { - changes = append(changes, diffKVs(prePointer.KVs, curPointer.KVs, idx)...) - } - } else if preBit == 1 && curBit == 0 { - // there exists a value in previous not found in current - it was removed - pointer := pre.getPointer(byte(pre.indexForBitPos(idx))) - - if pointer.isShard() { - child, err := pointer.loadChild(ctx, pre.store, pre.bitWidth, pre.hash) - if err != nil { - return nil, err - } - rm, err := removeAll(ctx, child, idx) - if err != nil { - return nil, err - } - changes = append(changes, rm...) - } else { - for _, p := range pointer.KVs { - changes = append(changes, &Change{ - Type: Remove, - Key: string(p.Key), - Before: p.Value, - After: nil, - }) - } - } - } else if curBit == 1 && preBit == 0 { - // there exists a value in current not found in previous - it was added - pointer := cur.getPointer(byte(cur.indexForBitPos(idx))) - - if pointer.isShard() { - child, err := pointer.loadChild(ctx, pre.store, pre.bitWidth, pre.hash) - if err != nil { - return nil, err - } - add, err := addAll(ctx, child, idx) - if err != nil { - return nil, err - } - changes = append(changes, add...) - } else { - for _, p := range pointer.KVs { - changes = append(changes, &Change{ - Type: Add, - Key: string(p.Key), - Before: nil, - After: p.Value, - }) - } - } - } - } - - return changes, nil -} - -func diffKVs(pre, cur []*KV, idx int) []*Change { - preMap := make(map[string]*cbg.Deferred, len(pre)) - curMap := make(map[string]*cbg.Deferred, len(cur)) - var changes []*Change - - for _, kv := range pre { - preMap[string(kv.Key)] = kv.Value - } - for _, kv := range cur { - curMap[string(kv.Key)] = kv.Value - } - // find removed keys: keys in pre and not in cur - for key, value := range preMap { - if _, ok := curMap[key]; !ok { - changes = append(changes, &Change{ - Type: Remove, - Key: key, - Before: value, - After: nil, - }) - } - } - // find added keys: keys in cur and not in pre - // find modified values: keys in cur and pre with different values - for key, curVal := range curMap { - if preVal, ok := preMap[key]; !ok { - changes = append(changes, &Change{ - Type: Add, - Key: key, - Before: nil, - After: curVal, - }) - } else { - if !bytes.Equal(preVal.Raw, curVal.Raw) { - changes = append(changes, &Change{ - Type: Modify, - Key: key, - Before: preVal, - After: curVal, - }) - } - } - } - return changes -} - -func addAll(ctx context.Context, node *Node, idx int) ([]*Change, error) { - var changes []*Change - if err := node.ForEach(ctx, func(k string, val *cbg.Deferred) error { - changes = append(changes, &Change{ - Type: Add, - Key: k, - Before: nil, - After: val, - }) - - return nil - }); err != nil { - return nil, err - } - return changes, nil -} - -func removeAll(ctx context.Context, node *Node, idx int) ([]*Change, error) { - var changes []*Change - if err := node.ForEach(ctx, func(k string, val *cbg.Deferred) error { - changes = append(changes, &Change{ - Type: Remove, - Key: k, - Before: val, - After: nil, - }) - - return nil - }); err != nil { - return nil, err - } - return changes, nil -} diff --git a/vendor/github.com/filecoin-project/go-hamt-ipld/v3/doc.go b/vendor/github.com/filecoin-project/go-hamt-ipld/v3/doc.go deleted file mode 100644 index b5dc12f..0000000 --- a/vendor/github.com/filecoin-project/go-hamt-ipld/v3/doc.go +++ /dev/null @@ -1,61 +0,0 @@ -/* -Package hamt provides a reference implementation of the IPLD HAMT used in the -Filecoin blockchain. It includes some optional flexibility such that it may be -used for other purposes outside of Filecoin. - -HAMT is a "hash array mapped trie" -https://en.wikipedia.org/wiki/Hash_array_mapped_trie. This implementation -extends the standard form by including buckets for the key/value pairs at -storage leaves and CHAMP mutation semantics -https://michael.steindorfer.name/publications/oopsla15.pdf. The CHAMP invariant -and mutation rules provide us with the ability to maintain canonical forms -given any set of keys and their values, regardless of insertion order and -intermediate data insertion and deletion. Therefore, for any given set of keys -and their values, a HAMT using the same parameters and CHAMP semantics, the -root node should always produce the same content identifier (CID). - -Algorithm Overview - -The HAMT algorithm hashes incoming keys and uses incrementing subsections of -that hash digest at each level of its tree structure to determine the placement -of either the entry or a link to a child node of the tree. A `bitWidth` -determines the number of bits of the hash to use for index calculation at each -level of the tree such that the root node takes the first `bitWidth` bits of -the hash to calculate an index and as we move lower in the tree, we move along -the hash by `depth x bitWidth` bits. In this way, a sufficiently randomizing -hash function will generate a hash that provides a new index at each level of -the data structure. An index comprising `bitWidth` bits will generate index -values of `[ 0, 2^bitWidth )`. So a `bitWidth` of 8 will generate indexes of 0 -to 255 inclusive. - -Each node in the tree can therefore hold up to `2^bitWidth` elements of data, -which we store in an array. In the this HAMT and the IPLD HashMap we store -entries in buckets. A `Set(key, value)` mutation where the index generated at -the root node for the hash of key denotes an array index that does not yet -contain an entry, we create a new bucket and insert the key / value pair entry. -In this way, a single node can theoretically hold up to -`2^bitWidth x bucketSize` entries, where `bucketSize` is the maximum number of -elements a bucket is allowed to contain ("collisions"). In practice, indexes do -not distribute with perfect randomness so this maximum is theoretical. Entries -stored in the node's buckets are stored in key-sorted order. - -Parameters - -This HAMT implementation: - -• Fixes the `bucketSize` to 3. - -• Defaults the `bitWidth` to 8, however within Filecoin it uses 5 - -• Defaults the hash algorithm to the 64-bit variant of Murmur3-x64 - -Further Reading - -The algorithm used here is identical to that of the IPLD HashMap algorithm -specified at -https://github.com/ipld/specs/blob/master/data-structures/hashmap.md. The -specific parameters used by Filecoin and the DAG-CBOR block layout differ from -the specification and are defined at -https://github.com/ipld/specs/blob/master/data-structures/hashmap.md#Appendix-Filecoin-hamt-variant. -*/ -package hamt diff --git a/vendor/github.com/filecoin-project/go-hamt-ipld/v3/hamt.go b/vendor/github.com/filecoin-project/go-hamt-ipld/v3/hamt.go deleted file mode 100644 index f406441..0000000 --- a/vendor/github.com/filecoin-project/go-hamt-ipld/v3/hamt.go +++ /dev/null @@ -1,857 +0,0 @@ -package hamt - -import ( - "bytes" - "context" - "fmt" - "math/big" - "sort" - - cid "github.com/ipfs/go-cid" - cbor "github.com/ipfs/go-ipld-cbor" - cbg "github.com/whyrusleeping/cbor-gen" -) - -//----------------------------------------------------------------------------- -// Boolean constants -type overwrite bool - -const ( - // use OVERWRITE for modifyValue operations that overwrite existing values - OVERWRITE = overwrite(true) - // use NOVERWRITE for modifyValue operations that cannot overwrite existing values - NOVERWRITE = overwrite(false) -) - -type modified bool - -const ( - // return MODIFIED when a key value mapping is overwritten - MODIFIED = modified(true) - // return UNMODIFIED when a no key value mappings are overwritten - UNMODIFIED = modified(false) -) - -//----------------------------------------------------------------------------- -// Errors - -// ErrMaxDepth is returned when the HAMT spans further than the hash function -// is capable of representing. This can occur when sufficient hash collisions -// (e.g. from a weak hash function and attacker-provided keys) extend leaf -// nodes beyond the number of bits that a hash can represent. Or this can occur -// on extremely large (likely impractical) HAMTs that are unable to be -// represented with the hash function used. Hash functions with larger byte -// output increase the maximum theoretical depth of a HAMT. -var ErrMaxDepth = fmt.Errorf("attempted to traverse HAMT beyond max-depth") - -// ErrMalformedHamt is returned whenever a block intended as a HAMT node does -// not conform to the expected form that a block may take. This can occur -// during block-load where initial validation takes place or during traversal -// where certain conditions are expected to be met. -var ErrMalformedHamt = fmt.Errorf("HAMT node was malformed") - -//----------------------------------------------------------------------------- -// Serialized data structures - -// HashFunc is a hashing function for values. -type HashFunc func([]byte) []byte - -// Node is a single point in the HAMT, encoded as an IPLD tuple in DAG-CBOR of -// shape: -// [bytes, [Pointer...]] -// where 'bytes' is the big.Int#Bytes() and the Pointers array is between 1 and -// `2^bitWidth`. -// -// The Bitfield provides us with a mechanism to store a compacted array of -// Pointers. Each bit in the Bitfield represents an element in a sparse array -// where `1` indicates the element is present in the Pointers array and `0` -// indicates it is omitted. To look-up a specific index in the Pointers array -// you must first make a count of the number of `1`s (popcount) up to the -// element you are looking for. -// e.g. a Bitfield of `10010110000` shows that we have a 4 element Pointers -// array. Indexes `[1]` and `[2]` are not present, but index `[3]` is at -// the second position of our Pointers array. -// -// The IPLD Schema representation of this data structure is as follows: -// -// type Node struct { -// bitfield Bytes -// pointers [Pointer] -// } representation tuple -type Node struct { - Bitfield *big.Int - Pointers []*Pointer - - bitWidth int - hash HashFunc - - // for fetching and storing children - store cbor.IpldStore -} - -// Pointer is an element in a HAMT node's Pointers array, encoded as an IPLD -// tuple in DAG-CBOR of shape: -// CID or [KV...] -// i.e. it is represented as a "kinded union" where a Link is a pointer to a -// child node, while an array is a bucket of elements local to this node. A -// Pointer must represent exactly one of of these two states and cannot be both -// (or neither). -// -// There are between 1 and 2^bitWidth of these Pointers in any HAMT node. -// -// A Pointer contains either a KV bucket of up to `bucketSize` (3) values or a -// link (CID) to a child node. When a KV bucket overflows beyond `bucketSize`, -// the bucket is replaced with a link to a newly created HAMT node which will -// contain the `bucketSize+1` elements in its own Pointers array. -// -// The IPLD Schema representation of this data structure is as follows: -// -// type Pointer union { -// &Node link -// Bucket list -// } representation kinded -// -// type Bucket [KV] -type Pointer struct { - KVs []*KV - Link cid.Cid - - // cache is a pointer to an in-memory Node, which may or may not be - // present, and corresponds to the Link field, which also may or may not - // be present. - // - // If present, the cached Node should be semantically substitutable with - // the Link field. It makes no sense for a cache Node to be present if KVs - // is set. Link might not be set, if cache is present and is describing - // data that has never yet been serialized and stored. - // - // `loadChild` will short circut to return this node if the pointer isn't - // nil; - // `loadChild` will also set this pointer when loading a node that wasn't - // yet present cached. - // `Flush` on a `Node` will iterate through each `Pointer` and `Put` its - // cache node if: - // 1. The Pointer's cache is not nil - // 2. The Pointer's dirty flag is true - // (and also recurse to `Flush` on that `Node`) -- in other words, - // `Flush` writes out the cached data - // `Flush` will assign `Link` in the process of `Put`'ing the 'cache' data. - // `Copy` will copy any cached nodes, Link fields and dirty flags. - // - // `Link` becomes defined on`Flush` - cache *Node - // dirty flag to indicate that the cached node needs to be flushed - dirty bool -} - -// KV represents leaf storage within a HAMT node. A Pointer may hold up to -// `bucketSize` KV elements, where each KV contains a key and value pair -// stored by the user. -// -// Keys are represented as bytes. -// -// The IPLD Schema representation of this data structure is as follows: -// -// type KV struct { -// key Bytes -// value Any -// } representation tuple -type KV struct { - Key []byte - Value *cbg.Deferred -} - -//----------------------------------------------------------------------------- -// Instance and helpers functions - -// NewNode creates a new IPLD HAMT Node with the given IPLD store and any -// additional options (bitWidth and hash function). -// -// This function creates a new HAMT that you can use directly and is also -// used internally to create child nodes. -func NewNode(cs cbor.IpldStore, options ...Option) (*Node, error) { - cfg := defaultConfig() - for _, option := range options { - if err := option(cfg); err != nil { - return nil, err - } - } - - return newNode(cs, cfg.hashFn, cfg.bitWidth), nil -} - -// Find navigates through the HAMT structure to where key `k` should exist. If -// the key is not found, returns false. If the key is found, returns true, and -// if the `out` parameter has an UnmarshalCBOR(Reader) method, the -// value is decoded into it. The `out` parameter may be nil to test for existence -// without decoding. -// -// Depending on the size of the HAMT, this method may load a large number of -// child nodes via the HAMT's IpldStore. -func (n *Node) Find(ctx context.Context, k string, out cbg.CBORUnmarshaler) (bool, error) { - var found bool - err := n.getValue(ctx, &hashBits{b: n.hash([]byte(k))}, k, func(kv *KV) error { - found = true - // Note that an interface pointer-to-nil is not == nil and, if received here, will panic. - if out == nil { - return nil - } - return out.UnmarshalCBOR(bytes.NewReader(kv.Value.Raw)) - }) - return found, err -} - -// FindRaw performs the same function as Find, but returns the raw bytes found -// at the key's location (which may or may not be DAG-CBOR, see also SetRaw). -func (n *Node) FindRaw(ctx context.Context, k string) (bool, []byte, error) { - var found bool - var value []byte - err := n.getValue(ctx, &hashBits{b: n.hash([]byte(k))}, k, func(kv *KV) error { - found = true - value = kv.Value.Raw - return nil - }) - return found, value, err -} - -// Delete removes an entry from the HAMT structure. -// -// Returns true if the key was found and deleted, false if the key was absent. -// -// This operation will result in the modification of _at least_ one IPLD block -// via the IpldStore. Depending on the contents of the leaf node, this -// operation may result in a node collapse to shrink the HAMT into its -// canonical form for the remaining data. For an insufficiently random -// collection of keys at the relevant leaf nodes such a collapse may cascade to -// further nodes. -func (n *Node) Delete(ctx context.Context, k string) (bool, error) { - kb := []byte(k) - modified, err := n.modifyValue(ctx, &hashBits{b: n.hash(kb)}, kb, nil, OVERWRITE) - return modified == MODIFIED, err -} - -// Constructs a new node value. -func newNode(cs cbor.IpldStore, hashFn HashFunc, bitWidth int) *Node { - nd := &Node{ - Bitfield: big.NewInt(0), - Pointers: make([]*Pointer, 0), - bitWidth: bitWidth, - hash: hashFn, - store: cs, - } - return nd -} - -// handle the two Find operations in a recursive manner, where each node in the -// HAMT we traverse we call this function again with the same parameters. -// Invokes the callback if and only if the key is found. -// Note that `hv` contains state and `hv.Next()` is not idempotent. Each call -// increments a counter for the number of bits consumed. -func (n *Node) getValue(ctx context.Context, hv *hashBits, k string, cb func(*KV) error) error { - // hv.Next chomps off `bitWidth` bits from the hash digest. As we proceed - // down the tree, each node takes `bitWidth` more bits from the digest. If - // we attempt to take more bits than the digest contains, we hit max-depth - // and can't proceed. - idx, err := hv.Next(n.bitWidth) - if err != nil { - return ErrMaxDepth - } - - // if the element expected at this node isn't here then we can be sure it - // doesn't exist in the HAMT. - if n.Bitfield.Bit(idx) == 0 { - return nil - } - - // otherwise, the value is either local or in a child - - // perform a popcount of bits up to the `idx` to find `cindex` - cindex := byte(n.indexForBitPos(idx)) - - c := n.getPointer(cindex) - if c.isShard() { - // if isShard, we have a pointer to a child that we need to load and - // delegate our find operation to - chnd, err := c.loadChild(ctx, n.store, n.bitWidth, n.hash) - if err != nil { - return err - } - - return chnd.getValue(ctx, hv, k, cb) - } - - // if not isShard, then the key/value pair is local and we need to retrieve - // it from the bucket. The bucket is sorted but only between 1 and - // `bucketSize` in length, so no need for fanciness. - for _, kv := range c.KVs { - if string(kv.Key) == k { - return cb(kv) - } - } - - return nil -} - -// load a HAMT node from the IpldStore and pass on the (assumed) parameters -// that are not stored with the node. -func (p *Pointer) loadChild(ctx context.Context, ns cbor.IpldStore, bitWidth int, hash HashFunc) (*Node, error) { - if p.cache != nil { - return p.cache, nil - } - - out, err := loadNode(ctx, ns, p.Link, false, bitWidth, hash) - if err != nil { - return nil, err - } - - p.cache = out - return out, nil -} - -// load a HAMT node from the IpldStore passing on the (assumed) parameters -// that are not stored with the node and return all KVs of the child and its children. -func (p *Pointer) loadChildKVs(ctx context.Context, ns cbor.IpldStore, bitWidth int, hash HashFunc) ([]*KV, error) { - child, err := p.loadChild(ctx, ns, bitWidth, hash) - if err != nil { - return nil, err - } - var out []*KV - if err := child.ForEach(ctx, func(k string, val *cbg.Deferred) error { - out = append(out, &KV{ - Key: []byte(k), - Value: val, - }) - return nil - }); err != nil { - return nil, err - } - return out, nil -} - -// LoadNode loads a HAMT Node from the IpldStore and configures it according -// to any specified Option parameters. Where the parameters of this HAMT vary -// from the defaults (hash function and bitWidth), those variations _must_ be -// supplied here via Options otherwise the HAMT will not be readable. -// -// Users should consider how their HAMT parameters are stored or specified -// along with their HAMT where the data is expected to have a long shelf-life -// as future users will need to know the parameters of a HAMT being loaded in -// order to decode it. Users should also NOT rely on the default parameters -// of this library to remain the defaults long-term and have strategies in -// place to manage variations. -func LoadNode(ctx context.Context, cs cbor.IpldStore, c cid.Cid, options ...Option) (*Node, error) { - cfg := defaultConfig() - for _, option := range options { - if err := option(cfg); err != nil { - return nil, err - } - } - return loadNode(ctx, cs, c, true, cfg.bitWidth, cfg.hashFn) -} - -// internal version of loadNode that is aware of whether this is a root node or -// not for the purpose of additional validation on non-root nodes. -func loadNode( - ctx context.Context, - cs cbor.IpldStore, - c cid.Cid, - isRoot bool, - bitWidth int, - hashFunction HashFunc, -) (*Node, error) { - var out Node - if err := cs.Get(ctx, c, &out); err != nil { - return nil, err - } - - out.store = cs - out.bitWidth = bitWidth - out.hash = hashFunction - - // Validation - - // too many elements in the data array for the configured bitWidth? - if len(out.Pointers) > 1< bucketSize { - return nil, ErrMalformedHamt - } - for i := 1; i < len(ch.KVs); i++ { - if bytes.Compare(ch.KVs[i-1].Key, ch.KVs[i].Key) >= 0 { - return nil, ErrMalformedHamt - } - } - } - } - - if !isRoot { - // the only valid empty node is a root node - if len(out.Pointers) == 0 { - return nil, ErrMalformedHamt - } - // a non-root node that contains <=bucketSize direct elements should not - // exist under compaction rules - if out.directChildCount() == 0 && out.directKVCount() <= bucketSize { - return nil, ErrMalformedHamt - } - } - - return &out, nil -} - -// checkSize computes the total serialized size of the entire HAMT. -// It both puts and loads blocks as necesary to do this -// (using the Put operation and a paired Get to discover the serial size, -// and the load to move recursively as necessary). -// -// This is an expensive operation and should only be used in testing and analysis. -// -// Note that checkSize *does* actually *use the blockstore*: therefore it -// will affect get and put counts (and makes no attempt to avoid duplicate puts!); -// be aware of this if you are measuring those event counts. -func (n *Node) checkSize(ctx context.Context) (uint64, error) { - c, err := n.store.Put(ctx, n) - if err != nil { - return 0, err - } - - var def cbg.Deferred - if err := n.store.Get(ctx, c, &def); err != nil { - return 0, nil - } - - totsize := uint64(len(def.Raw)) - for _, ch := range n.Pointers { - if ch.isShard() { - chnd, err := ch.loadChild(ctx, n.store, n.bitWidth, n.hash) - if err != nil { - return 0, err - } - chsize, err := chnd.checkSize(ctx) - if err != nil { - return 0, err - } - totsize += chsize - } - } - - return totsize, nil -} - -// Flush has two effectis, it (partially!) persists data and resets dirty flag -// -// Flush operates recursively, telling each "cache" child node to flush; -// Put'ing that "cache" node to the store; -// updating this node's Link to the CID resulting from the store Put; -// clearing the dirty flag of that pointer to flase -// and then returning. -// Flush doesn't operate unless there's a "cache" node. -// -// "cache" nodes were previously storing either updated values, -// or, simply storing previously loaded data; these are disambiguated by the -// dirty flag which is true when the cache node's data has not been persisted -// -// Notice that Flush _does not_ Put _this node_. -// To fully persist changes, the caller still needs to Put this node to the -// store themselves, and store the new resulting Link wherever they expect the -// updated HAMT to be seen. -func (n *Node) Flush(ctx context.Context) error { - for _, p := range n.Pointers { - if p.cache != nil && p.dirty { - if err := p.cache.Flush(ctx); err != nil { - return err - } - - c, err := n.store.Put(ctx, p.cache) - if err != nil { - return err - } - - p.dirty = false - p.Link = c - } - } - return nil -} - -// Set key k to value v, where v is has a MarshalCBOR(bytes.Buffer) method to -// encode it. -// -// To fully commit the change, it is necessary to Flush the root Node, -// and then additionally Put the root node to the store itself, -// and save the resulting CID wherever you expect the HAMT root to persist. -func (n *Node) Set(ctx context.Context, k string, v cbg.CBORMarshaler) error { - var d cbg.Deferred - if v == nil { - d.Raw = cbg.CborNull - } else { - valueBuf := new(bytes.Buffer) - if err := v.MarshalCBOR(valueBuf); err != nil { - return err - } - d.Raw = valueBuf.Bytes() - } - - keyBytes := []byte(k) - _, err := n.modifyValue(ctx, &hashBits{b: n.hash(keyBytes)}, keyBytes, &d, OVERWRITE) - return err -} - -// SetIfAbsent sets key k to value v only if k is not already set to some value. -// Returns true if the value mapped to k is changed by this operation -// false otherwise. -func (n *Node) SetIfAbsent(ctx context.Context, k string, v cbg.CBORMarshaler) (bool, error) { - var d cbg.Deferred - if v == nil { - d.Raw = cbg.CborNull - } else { - valueBuf := new(bytes.Buffer) - if err := v.MarshalCBOR(valueBuf); err != nil { - return false, err - } - d.Raw = valueBuf.Bytes() - } - - keyBytes := []byte(k) - modified, err := n.modifyValue(ctx, &hashBits{b: n.hash(keyBytes)}, keyBytes, &d, NOVERWRITE) - return bool(modified), err -} - -// SetRaw is similar to Set but sets key k in the HAMT to raw bytes without -// performing a DAG-CBOR marshal. The bytes may or may not be encoded DAG-CBOR -// (see also FindRaw for fetching raw form). -func (n *Node) SetRaw(ctx context.Context, k string, raw []byte) error { - d := &cbg.Deferred{Raw: raw} - kb := []byte(k) - _, err := n.modifyValue(ctx, &hashBits{b: n.hash(kb)}, kb, d, OVERWRITE) - return err -} - -// the number of links to child nodes this node contains -func (n *Node) directChildCount() int { - count := 0 - for _, p := range n.Pointers { - if p.isShard() { - count++ - } - } - return count -} - -// the number of KV entries this node contains -func (n *Node) directKVCount() int { - count := 0 - for _, p := range n.Pointers { - if !p.isShard() { - count = count + len(p.KVs) - } - } - return count -} - -// This happens after deletes to ensure that we retain canonical form for the -// given set of data this HAMT contains. This is a key part of the CHAMP -// algorithm. Any node that could be represented as a bucket in a parent node -// should be collapsed as such. This collapsing process could continue back up -// the tree as far as necessary to represent the data in the minimal HAMT form. -// This operation is done from a parent perspective, so we clean the child -// below us first and then our parent cleans us. -func (n *Node) cleanChild(chnd *Node, cindex byte) error { - if chnd.directChildCount() != 0 { - // child has its own children, nothing to collapse - return nil - } - - if chnd.directKVCount() > bucketSize { - // child contains more local elements than could be collapsed - return nil - } - - if len(chnd.Pointers) == 1 { - // The case where the child node has a single bucket, which we know can - // only contain `bucketSize` elements (maximum), so we need to pull that - // bucket up into this node. - // This case should only happen when it bubbles up from the case below - // where a lower child has its elements compacted into a single bucket. We - // shouldn't be able to reach this block unless a delete has been - // performed on a lower block and we are performing a post-delete clean on - // a parent block. - return n.setPointer(cindex, chnd.Pointers[0]) - } - - // The case where the child node contains enough elements to fit in a - // single bucket and therefore can't justify its existence as a node on its - // own. So we collapse all entries into a single bucket and replace the - // link to the child with that bucket. - // This may cause cascading collapses if this is the only bucket in the - // current node, that case will be handled by our parent node by the l==1 - // case above. - var chvals []*KV - for _, p := range chnd.Pointers { - chvals = append(chvals, p.KVs...) - } - kvLess := func(i, j int) bool { - ki := chvals[i].Key - kj := chvals[j].Key - return bytes.Compare(ki, kj) < 0 - } - sort.Slice(chvals, kvLess) - - return n.setPointer(cindex, &Pointer{KVs: chvals}) -} - -// Add a new value, update an existing value, or delete a value from the HAMT, -// potentially recursively calling child nodes to find the exact location of -// the entry in question and potentially collapsing nodes into buckets in -// parent nodes where a deletion violates the canonical form rules (see -// cleanNode()). Recursive calls use the same arguments on child nodes but -// note that `hv.Next()` is not idempotent. Each call will increment the number -// of bits chomped off the hash digest for this key. -func (n *Node) modifyValue(ctx context.Context, hv *hashBits, k []byte, v *cbg.Deferred, replace overwrite) (modified, error) { - idx, err := hv.Next(n.bitWidth) - if err != nil { - return UNMODIFIED, ErrMaxDepth - } - - // if the element expected at this node isn't here then we can be sure it - // doesn't exist in the HAMT already and can insert it at the appropriate - // position. - if n.Bitfield.Bit(idx) != 1 { - if v == nil { // Delete absent key - return UNMODIFIED, nil - } - return MODIFIED, n.insertKV(idx, k, v) - } - - // otherwise, the value is either local or in a child - - // perform a popcount of bits up to the `idx` to find `cindex` - cindex := byte(n.indexForBitPos(idx)) - - child := n.getPointer(cindex) - if child.isShard() { - // if isShard, we have a pointer to a child that we need to load and - // delegate our modify operation to. - // Note that this loadChild operation will cause the loaded node to be - // "cached" and this pointer to be marked as dirty; - // it is an eventual Flush passing back over this "cache" node which - // causes the updates made to the in-memory "cache" node to eventually - // be persisted. - chnd, err := child.loadChild(ctx, n.store, n.bitWidth, n.hash) - if err != nil { - return UNMODIFIED, err - } - - modified, err := chnd.modifyValue(ctx, hv, k, v, replace) - if err != nil { - return UNMODIFIED, err - } - - if modified { - // if we are modifying set the child.dirty - // if we are not modifying leave it be, another operation might had set it previously - child.dirty = true - } - - // CHAMP optimization, ensure the HAMT retains its canonical form for the - // current data it contains. This may involve collapsing child nodes if - // they no longer contain enough elements to justify their stand-alone - // existence. - if v == nil { - if err := n.cleanChild(chnd, cindex); err != nil { - return UNMODIFIED, err - } - } - - return modified, nil - } - - // if not isShard, then either the key/value pair is local here and can be - // modified (or deleted) here or needs to be added as a new child node if - // there is an overflow. - - if v == nil { - // delete operation, find the child and remove it, compacting the bucket in - // the process - for i, p := range child.KVs { - if bytes.Equal(p.Key, k) { - if len(child.KVs) == 1 { - // last element in the bucket, remove it and update the bitfield - return MODIFIED, n.rmPointer(cindex, idx) - } - - copy(child.KVs[i:], child.KVs[i+1:]) - child.KVs = child.KVs[:len(child.KVs)-1] - return MODIFIED, nil - } - } - return UNMODIFIED, nil // Delete absent key - } - - // modify existing, check if key already exists - for _, p := range child.KVs { - if bytes.Equal(p.Key, k) { - if bool(replace) && !bytes.Equal(p.Value.Raw, v.Raw) { - p.Value = v - return MODIFIED, nil - } - return UNMODIFIED, nil - } - } - - if len(child.KVs) >= bucketSize { - // bucket is full, create a child node (shard) with all existing bucket - // elements plus the new one and set it in the place of the bucket - sub := newNode(n.store, n.hash, n.bitWidth) - hvcopy := &hashBits{b: hv.b, consumed: hv.consumed} - if _, err := sub.modifyValue(ctx, hvcopy, k, v, replace); err != nil { - return UNMODIFIED, err - } - - for _, p := range child.KVs { - chhv := &hashBits{b: n.hash(p.Key), consumed: hv.consumed} - if _, err := sub.modifyValue(ctx, chhv, p.Key, p.Value, replace); err != nil { - return UNMODIFIED, err - } - } - - return MODIFIED, n.setPointer(cindex, &Pointer{cache: sub, dirty: true}) - } - - // otherwise insert the new element into the array in order, the ordering is - // important to retain canonical form - np := &KV{Key: k, Value: v} - for i := 0; i < len(child.KVs); i++ { - if bytes.Compare(k, child.KVs[i].Key) < 0 { - child.KVs = append(child.KVs[:i], append([]*KV{np}, child.KVs[i:]...)...) - return MODIFIED, nil - } - } - child.KVs = append(child.KVs, np) - return MODIFIED, nil -} - -// Insert a new key/value pair into the current node at the specified index. -// This will involve modifying the bitfield for that index and inserting a new -// bucket containing the single key/value pair at that position. -func (n *Node) insertKV(idx int, k []byte, v *cbg.Deferred) error { - i := n.indexForBitPos(idx) - n.Bitfield.SetBit(n.Bitfield, idx, 1) - - p := &Pointer{KVs: []*KV{{Key: k, Value: v}}} - - n.Pointers = append(n.Pointers[:i], append([]*Pointer{p}, n.Pointers[i:]...)...) - return nil -} - -// Set a Pointer at a specific location, this doesn't modify the elements array -// but assumes that what's there can be updated. This seems to mostly be useful -// for tail calls. -func (n *Node) setPointer(i byte, p *Pointer) error { - n.Pointers[i] = p - return nil -} - -// Remove a child at a specified index, splicing the Pointers array to remove -// it and updating the bitfield to specify that an element no longer exists at -// that position. -func (n *Node) rmPointer(i byte, idx int) error { - copy(n.Pointers[i:], n.Pointers[i+1:]) - n.Pointers = n.Pointers[:len(n.Pointers)-1] - n.Bitfield.SetBit(n.Bitfield, idx, 0) - - return nil -} - -// Load a Pointer from the specified index of the Pointers array. The element -// should exist in a properly formed HAMT. -func (n *Node) getPointer(i byte) *Pointer { - if int(i) >= len(n.Pointers) { - // TODO(rvagg): I think this should be an error, there's an assumption in - // calling code that it's not null and a proper hash chomp shouldn't result - // in anything out of bounds - return nil - } - - return n.Pointers[i] -} - -// Copy a HAMT node and all of its contents. May be useful for mutation -// operations where the original needs to be preserved in memory. -// -// This operation will also recursively clone any child nodes that are attached -// as cached nodes. -func (n *Node) Copy() *Node { - // TODO(rvagg): clarify what situations this method is actually useful for. - nn := newNode(n.store, n.hash, n.bitWidth) - nn.Bitfield.Set(n.Bitfield) - nn.Pointers = make([]*Pointer, len(n.Pointers)) - - for i, p := range n.Pointers { - pp := &Pointer{} - if p.cache != nil { - pp.cache = p.cache.Copy() - pp.dirty = p.dirty - } - pp.Link = p.Link - if p.KVs != nil { - pp.KVs = make([]*KV, len(p.KVs)) - for j, kv := range p.KVs { - pp.KVs[j] = &KV{Key: kv.Key, Value: kv.Value} - } - } - nn.Pointers[i] = pp - } - - return nn -} - -// Pointers elements can either contain a bucket of local elements or be a -// link to a child node. In the case of a link, isShard() returns true. -func (p *Pointer) isShard() bool { - return p.cache != nil || p.Link.Defined() -} - -// ForEach recursively calls function f on each k / val pair found in the HAMT. -// This performs a full traversal of the graph and for large HAMTs can cause -// a large number of loads from the underlying store. -// The values are returned as raw bytes, not decoded. -func (n *Node) ForEach(ctx context.Context, f func(k string, val *cbg.Deferred) error) error { - for _, p := range n.Pointers { - if p.isShard() { - chnd, err := p.loadChild(ctx, n.store, n.bitWidth, n.hash) - if err != nil { - return err - } - - if err := chnd.ForEach(ctx, f); err != nil { - return err - } - } else { - for _, kv := range p.KVs { - if err := f(string(kv.Key), kv.Value); err != nil { - return err - } - } - } - } - return nil -} diff --git a/vendor/github.com/filecoin-project/go-hamt-ipld/v3/hash.go b/vendor/github.com/filecoin-project/go-hamt-ipld/v3/hash.go deleted file mode 100644 index 3f055a9..0000000 --- a/vendor/github.com/filecoin-project/go-hamt-ipld/v3/hash.go +++ /dev/null @@ -1,72 +0,0 @@ -package hamt - -import ( - "fmt" - - "github.com/spaolacci/murmur3" -) - -// hashBits is a helper that allows the reading of the 'next n bits' of a -// digest as an integer. State is retained and calls to `Next` will -// increment the number of consumed bits. -type hashBits struct { - b []byte - consumed int -} - -func mkmask(n int) byte { - return (1 << uint(n)) - 1 -} - -// Next returns the next 'i' bits of the hashBits value as an integer, or an -// error if there aren't enough bits. -// Not enough bits means that the tree is not large enough to contain the data. -// Where the hash is providing a sufficient enough random distribution this -// means that it is "full", Where the distribution is not sufficiently random -// enough, this means there have been too many collisions. Where a user can -// control keys (that are hashed) and the hash function has some -// predictability, collisions can be forced by producing the same indexes at -// (most) levels. -func (hb *hashBits) Next(i int) (int, error) { - if hb.consumed+i > len(hb.b)*8 { - // TODO(rvagg): this msg looks like a UnixFS holdover, it's an overflow - // and should probably bubble up a proper Err* - return 0, fmt.Errorf("sharded directory too deep") - } - return hb.next(i), nil -} - -// where 'i' is not '8', we need to read up to two bytes to extract the bits -// for the index. -func (hb *hashBits) next(i int) int { - curbi := hb.consumed / 8 - leftb := 8 - (hb.consumed % 8) - - curb := hb.b[curbi] - if i == leftb { - out := int(mkmask(i) & curb) - hb.consumed += i - return out - } else if i < leftb { - a := curb & mkmask(leftb) // mask out the high bits we don't want - b := a & ^mkmask(leftb-i) // mask out the low bits we don't want - c := b >> uint(leftb-i) // shift whats left down - hb.consumed += i - return int(c) - } else { - out := int(mkmask(leftb) & curb) - out <<= uint(i - leftb) - hb.consumed += leftb - out += hb.next(i - leftb) - return out - } -} - -func defaultHashFunction(val []byte) []byte { - h := murmur3.New64() - _, err := h.Write(val) - if err != nil { - panic(err) // Impossible - } - return h.Sum(nil) -} diff --git a/vendor/github.com/filecoin-project/go-hamt-ipld/v3/options.go b/vendor/github.com/filecoin-project/go-hamt-ipld/v3/options.go deleted file mode 100644 index dfc09ff..0000000 --- a/vendor/github.com/filecoin-project/go-hamt-ipld/v3/options.go +++ /dev/null @@ -1,60 +0,0 @@ -package hamt - -import "fmt" - -const bucketSize = 3 -const defaultBitWidth = 8 - -type config struct { - bitWidth int - hashFn HashFunc -} - -func defaultConfig() *config { - return &config{ - bitWidth: defaultBitWidth, - hashFn: defaultHashFunction, - } -} - -// Option is a function that configures a HAMT. -type Option func(*config) error - -// UseTreeBitWidth allows you to set a custom bitWidth of the HAMT in bits -// (from 1-8). -// -// Passing in the returned Option to NewNode will generate a new HAMT that uses -// the specified bitWidth. -// -// The default bitWidth is 8. -func UseTreeBitWidth(bitWidth int) Option { - return func(c *config) error { - if bitWidth < 1 { - return fmt.Errorf("configured bitwidth %d below minimum of 1", bitWidth) - } else if bitWidth > 8 { - return fmt.Errorf("configured bitwidth %d exceeds maximum of 8", bitWidth) - } - c.bitWidth = bitWidth - return nil - } -} - -// UseHashFunction allows you to set the hash function used for internal -// indexing by the HAMT. -// -// Passing in the returned Option to NewNode will generate a new HAMT that uses -// the specified hash function. -// -// The default hash function is murmur3-x64 but you should use a -// cryptographically secure function such as SHA2-256 if an attacker may be -// able to pick the keys in order to avoid potential hash collision (tree -// explosion) attacks. -func UseHashFunction(hash HashFunc) Option { - return func(c *config) error { - if hash == nil { - return fmt.Errorf("configured hash function was nil") - } - c.hashFn = hash - return nil - } -} diff --git a/vendor/github.com/filecoin-project/go-hamt-ipld/v3/pointer_cbor.go b/vendor/github.com/filecoin-project/go-hamt-ipld/v3/pointer_cbor.go deleted file mode 100644 index d39db84..0000000 --- a/vendor/github.com/filecoin-project/go-hamt-ipld/v3/pointer_cbor.go +++ /dev/null @@ -1,103 +0,0 @@ -package hamt - -import ( - "fmt" - "io" - - cid "github.com/ipfs/go-cid" - cbg "github.com/whyrusleeping/cbor-gen" -) - -// implemented as a kinded union - a "Pointer" is either a Link (child node) or -// an Array (bucket) - -func (t *Pointer) MarshalCBOR(w io.Writer) error { - if t.Link != cid.Undef && len(t.KVs) > 0 { - return fmt.Errorf("hamt Pointer cannot have both a link and KVs") - } - - scratch := make([]byte, 9) - - if t.Link != cid.Undef { - if err := cbg.WriteCidBuf(scratch, w, t.Link); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.KVs))); err != nil { - return err - } - - for _, kv := range t.KVs { - if err := kv.MarshalCBOR(w); err != nil { - return err - } - } - } - - return nil -} - -func (t *Pointer) UnmarshalCBOR(br io.Reader) error { - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if maj == cbg.MajTag { - if extra != 42 { - return fmt.Errorf("expected tag 42 for child node link") - } - - ba, err := cbg.ReadByteArray(br, 512) - if err != nil { - return err - } - - c, err := bufToCid(ba) - if err != nil { - return err - } - - t.Link = c - return nil - } else if maj == cbg.MajArray { - length := extra - - if length > 32 { - return fmt.Errorf("KV array in CBOR input for pointer was too long") - } - - t.KVs = make([]*KV, length) - for i := 0; i < int(length); i++ { - var kv KV - if err := kv.UnmarshalCBOR(br); err != nil { - return err - } - - t.KVs[i] = &kv - } - - return nil - } else { - return fmt.Errorf("expected CBOR child node link or array") - } -} - -// from https://github.com/whyrusleeping/cbor-gen/blob/211df3b9e24c6e0d0c338b440e6ab4ab298505b2/utils.go#L530 -func bufToCid(buf []byte) (cid.Cid, error) { - if len(buf) == 0 { - return cid.Undef, fmt.Errorf("undefined CID") - } - - if len(buf) < 2 { - return cid.Undef, fmt.Errorf("DAG-CBOR serialized CIDs must have at least two bytes") - } - - if buf[0] != 0 { - return cid.Undef, fmt.Errorf("DAG-CBOR serialized CIDs must have binary multibase") - } - - return cid.Cast(buf[1:]) -} diff --git a/vendor/github.com/filecoin-project/go-hamt-ipld/v3/uhamt.go b/vendor/github.com/filecoin-project/go-hamt-ipld/v3/uhamt.go deleted file mode 100644 index 29e6740..0000000 --- a/vendor/github.com/filecoin-project/go-hamt-ipld/v3/uhamt.go +++ /dev/null @@ -1,41 +0,0 @@ -package hamt - -import ( - "math/big" - "math/bits" -) - -// indexForBitPos returns the index within the collapsed array corresponding to -// the given bit in the bitset. The collapsed array contains only one entry -// per bit set in the bitfield, and this function is used to map the indices. -// This is similar to a popcount() operation but is limited to a certain index. -// e.g. a Bitfield of `10010110000` shows that we have a 4 elements in the -// associated array. Indexes `[1]` and `[2]` are not present, but index `[3]` -// is at the second position of our Pointers array. -func (n *Node) indexForBitPos(bp int) int { - return indexForBitPos(bp, n.Bitfield) -} - -func indexForBitPos(bp int, bitfield *big.Int) int { - var x uint - var count, i int - w := bitfield.Bits() - for x = uint(bp); x > bits.UintSize && i < len(w); x -= bits.UintSize { - count += bits.OnesCount(uint(w[i])) - i++ - } - if i == len(w) { - return count - } - return count + bits.OnesCount(uint(w[i])&((1<>bits.TrailingZeros64(uint64(s)) != 127 { - return xerrors.New("unpadded piece size must be a power of 2 multiple of 127") - } - - return nil -} - -func (s PaddedPieceSize) Unpadded() UnpaddedPieceSize { - return UnpaddedPieceSize(s - (s / 128)) -} - -func (s PaddedPieceSize) Validate() error { - if s < 128 { - return xerrors.New("minimum padded piece size is 128 bytes") - } - - if bits.OnesCount64(uint64(s)) != 1 { - return xerrors.New("padded piece size must be a power of 2") - } - - return nil -} - -type PieceInfo struct { - Size PaddedPieceSize // Size in nodes. For BLS12-381 (capacity 254 bits), must be >= 16. (16 * 8 = 128) - PieceCID cid.Cid -} diff --git a/vendor/github.com/filecoin-project/go-state-types/abi/sector.go b/vendor/github.com/filecoin-project/go-state-types/abi/sector.go deleted file mode 100644 index 6be4ecf..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/abi/sector.go +++ /dev/null @@ -1,328 +0,0 @@ -package abi - -import ( - "fmt" - "math" - "strconv" - - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-state-types/big" -) - -// SectorNumber is a numeric identifier for a sector. It is usually relative to a miner. -type SectorNumber uint64 - -func (s SectorNumber) String() string { - return strconv.FormatUint(uint64(s), 10) -} - -// The maximum assignable sector number. -// Raising this would require modifying our AMT implementation. -const MaxSectorNumber = math.MaxInt64 - -// SectorSize indicates one of a set of possible sizes in the network. -// Ideally, SectorSize would be an enum -// type SectorSize enum { -// 1KiB = 1024 -// 1MiB = 1048576 -// 1GiB = 1073741824 -// 1TiB = 1099511627776 -// 1PiB = 1125899906842624 -// 1EiB = 1152921504606846976 -// max = 18446744073709551615 -// } -type SectorSize uint64 - -// Formats the size as a decimal string. -func (s SectorSize) String() string { - return strconv.FormatUint(uint64(s), 10) -} - -// Abbreviates the size as a human-scale number. -// This approximates (truncates) the size unless it is a power of 1024. -func (s SectorSize) ShortString() string { - var biUnits = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB"} - unit := 0 - for s >= 1024 && unit < len(biUnits)-1 { - s /= 1024 - unit++ - } - return fmt.Sprintf("%d%s", s, biUnits[unit]) -} - -type SectorID struct { - Miner ActorID - Number SectorNumber -} - -// The unit of storage power (measured in bytes) -type StoragePower = big.Int - -type SectorQuality = big.Int - -func NewStoragePower(n int64) StoragePower { - return big.NewInt(n) -} - -// These enumerations must match the proofs library and never change. -type RegisteredSealProof int64 - -const ( - RegisteredSealProof_StackedDrg2KiBV1 = RegisteredSealProof(0) - RegisteredSealProof_StackedDrg8MiBV1 = RegisteredSealProof(1) - RegisteredSealProof_StackedDrg512MiBV1 = RegisteredSealProof(2) - RegisteredSealProof_StackedDrg32GiBV1 = RegisteredSealProof(3) - RegisteredSealProof_StackedDrg64GiBV1 = RegisteredSealProof(4) - - RegisteredSealProof_StackedDrg2KiBV1_1 = RegisteredSealProof(5) - RegisteredSealProof_StackedDrg8MiBV1_1 = RegisteredSealProof(6) - RegisteredSealProof_StackedDrg512MiBV1_1 = RegisteredSealProof(7) - RegisteredSealProof_StackedDrg32GiBV1_1 = RegisteredSealProof(8) - RegisteredSealProof_StackedDrg64GiBV1_1 = RegisteredSealProof(9) -) - -type RegisteredPoStProof int64 - -const ( - RegisteredPoStProof_StackedDrgWinning2KiBV1 = RegisteredPoStProof(0) - RegisteredPoStProof_StackedDrgWinning8MiBV1 = RegisteredPoStProof(1) - RegisteredPoStProof_StackedDrgWinning512MiBV1 = RegisteredPoStProof(2) - RegisteredPoStProof_StackedDrgWinning32GiBV1 = RegisteredPoStProof(3) - RegisteredPoStProof_StackedDrgWinning64GiBV1 = RegisteredPoStProof(4) - RegisteredPoStProof_StackedDrgWindow2KiBV1 = RegisteredPoStProof(5) - RegisteredPoStProof_StackedDrgWindow8MiBV1 = RegisteredPoStProof(6) - RegisteredPoStProof_StackedDrgWindow512MiBV1 = RegisteredPoStProof(7) - RegisteredPoStProof_StackedDrgWindow32GiBV1 = RegisteredPoStProof(8) - RegisteredPoStProof_StackedDrgWindow64GiBV1 = RegisteredPoStProof(9) -) - -type RegisteredAggregationProof int64 - -const ( - RegisteredAggregationProof_SnarkPackV1 = RegisteredAggregationProof(0) - RegisteredAggregationProof_SnarkPackV2 = RegisteredAggregationProof(1) -) - -type RegisteredUpdateProof int64 - -const ( - RegisteredUpdateProof_StackedDrg2KiBV1 = RegisteredUpdateProof(0) - RegisteredUpdateProof_StackedDrg8MiBV1 = RegisteredUpdateProof(1) - RegisteredUpdateProof_StackedDrg512MiBV1 = RegisteredUpdateProof(2) - RegisteredUpdateProof_StackedDrg32GiBV1 = RegisteredUpdateProof(3) - RegisteredUpdateProof_StackedDrg64GiBV1 = RegisteredUpdateProof(4) -) - -// Metadata about a seal proof type. -type SealProofInfo struct { - // The proof sizes are 192 * the number of "porep" partitions. - // https://github.com/filecoin-project/rust-fil-proofs/blob/64390b6fcedb04dd1fdbe43c82b1e91c1439cea2/filecoin-proofs/src/constants.rs#L68-L80 - ProofSize uint64 - SectorSize SectorSize - WinningPoStProof RegisteredPoStProof - WindowPoStProof RegisteredPoStProof - UpdateProof RegisteredUpdateProof -} - -const ( - ss2KiB = 2 << 10 - ss8MiB = 8 << 20 - ss512MiB = 512 << 20 - ss32GiB = 32 << 30 - ss64GiB = 64 << 30 -) - -var SealProofInfos = map[RegisteredSealProof]*SealProofInfo{ - RegisteredSealProof_StackedDrg2KiBV1: { - ProofSize: 192, - SectorSize: ss2KiB, - WinningPoStProof: RegisteredPoStProof_StackedDrgWinning2KiBV1, - WindowPoStProof: RegisteredPoStProof_StackedDrgWindow2KiBV1, - UpdateProof: RegisteredUpdateProof_StackedDrg2KiBV1, - }, - - RegisteredSealProof_StackedDrg8MiBV1: { - ProofSize: 192, - SectorSize: ss8MiB, - WinningPoStProof: RegisteredPoStProof_StackedDrgWinning8MiBV1, - WindowPoStProof: RegisteredPoStProof_StackedDrgWindow8MiBV1, - UpdateProof: RegisteredUpdateProof_StackedDrg8MiBV1, - }, - RegisteredSealProof_StackedDrg512MiBV1: { - ProofSize: 192, - SectorSize: ss512MiB, - WinningPoStProof: RegisteredPoStProof_StackedDrgWinning512MiBV1, - WindowPoStProof: RegisteredPoStProof_StackedDrgWindow512MiBV1, - UpdateProof: RegisteredUpdateProof_StackedDrg512MiBV1, - }, - RegisteredSealProof_StackedDrg32GiBV1: { - ProofSize: 1920, - SectorSize: ss32GiB, - WinningPoStProof: RegisteredPoStProof_StackedDrgWinning32GiBV1, - WindowPoStProof: RegisteredPoStProof_StackedDrgWindow32GiBV1, - UpdateProof: RegisteredUpdateProof_StackedDrg32GiBV1, - }, - RegisteredSealProof_StackedDrg64GiBV1: { - ProofSize: 1920, - SectorSize: ss64GiB, - WinningPoStProof: RegisteredPoStProof_StackedDrgWinning64GiBV1, - WindowPoStProof: RegisteredPoStProof_StackedDrgWindow64GiBV1, - UpdateProof: RegisteredUpdateProof_StackedDrg64GiBV1, - }, - - RegisteredSealProof_StackedDrg2KiBV1_1: { - ProofSize: 192, - SectorSize: ss2KiB, - WinningPoStProof: RegisteredPoStProof_StackedDrgWinning2KiBV1, - WindowPoStProof: RegisteredPoStProof_StackedDrgWindow2KiBV1, - UpdateProof: RegisteredUpdateProof_StackedDrg2KiBV1, - }, - RegisteredSealProof_StackedDrg8MiBV1_1: { - ProofSize: 192, - SectorSize: ss8MiB, - WinningPoStProof: RegisteredPoStProof_StackedDrgWinning8MiBV1, - WindowPoStProof: RegisteredPoStProof_StackedDrgWindow8MiBV1, - UpdateProof: RegisteredUpdateProof_StackedDrg8MiBV1, - }, - RegisteredSealProof_StackedDrg512MiBV1_1: { - ProofSize: 192, - SectorSize: ss512MiB, - WinningPoStProof: RegisteredPoStProof_StackedDrgWinning512MiBV1, - WindowPoStProof: RegisteredPoStProof_StackedDrgWindow512MiBV1, - UpdateProof: RegisteredUpdateProof_StackedDrg512MiBV1, - }, - RegisteredSealProof_StackedDrg32GiBV1_1: { - ProofSize: 1920, - SectorSize: ss32GiB, - WinningPoStProof: RegisteredPoStProof_StackedDrgWinning32GiBV1, - WindowPoStProof: RegisteredPoStProof_StackedDrgWindow32GiBV1, - UpdateProof: RegisteredUpdateProof_StackedDrg32GiBV1, - }, - RegisteredSealProof_StackedDrg64GiBV1_1: { - ProofSize: 1920, - SectorSize: ss64GiB, - WinningPoStProof: RegisteredPoStProof_StackedDrgWinning64GiBV1, - WindowPoStProof: RegisteredPoStProof_StackedDrgWindow64GiBV1, - UpdateProof: RegisteredUpdateProof_StackedDrg64GiBV1, - }, -} - -// ProofSize returns the size of seal proofs for the given sector type. -func (p RegisteredSealProof) ProofSize() (uint64, error) { - info, ok := SealProofInfos[p] - if !ok { - return 0, xerrors.Errorf("unsupported proof type: %v", p) - } - return info.ProofSize, nil -} - -func (p RegisteredSealProof) SectorSize() (SectorSize, error) { - info, ok := SealProofInfos[p] - if !ok { - return 0, xerrors.Errorf("unsupported proof type: %v", p) - } - return info.SectorSize, nil -} - -// RegisteredWinningPoStProof produces the PoSt-specific RegisteredProof corresponding -// to the receiving RegisteredProof. -func (p RegisteredSealProof) RegisteredWinningPoStProof() (RegisteredPoStProof, error) { - info, ok := SealProofInfos[p] - if !ok { - return 0, xerrors.Errorf("unsupported proof type: %v", p) - } - return info.WinningPoStProof, nil -} - -// RegisteredWindowPoStProof produces the PoSt-specific RegisteredProof corresponding -// to the receiving RegisteredProof. -func (p RegisteredSealProof) RegisteredWindowPoStProof() (RegisteredPoStProof, error) { - info, ok := SealProofInfos[p] - if !ok { - return 0, xerrors.Errorf("unsupported proof type: %v", p) - } - return info.WindowPoStProof, nil -} - -// RegisteredUpdateProof produces the Update-specific RegisteredProof corresponding -// to the receiving RegisteredProof. -func (p RegisteredSealProof) RegisteredUpdateProof() (RegisteredUpdateProof, error) { - info, ok := SealProofInfos[p] - if !ok { - return 0, xerrors.Errorf("unsupported proof type: %v", p) - } - return info.UpdateProof, nil -} - -// Metadata about a PoSt proof type. -type PoStProofInfo struct { - SectorSize SectorSize - - // Size of a single proof. - ProofSize uint64 -} - -var PoStProofInfos = map[RegisteredPoStProof]*PoStProofInfo{ - RegisteredPoStProof_StackedDrgWinning2KiBV1: { - SectorSize: ss2KiB, - ProofSize: 192, - }, - RegisteredPoStProof_StackedDrgWinning8MiBV1: { - SectorSize: ss8MiB, - ProofSize: 192, - }, - RegisteredPoStProof_StackedDrgWinning512MiBV1: { - SectorSize: ss512MiB, - ProofSize: 192, - }, - RegisteredPoStProof_StackedDrgWinning32GiBV1: { - SectorSize: ss32GiB, - ProofSize: 192, - }, - RegisteredPoStProof_StackedDrgWinning64GiBV1: { - SectorSize: ss64GiB, - ProofSize: 192, - }, - RegisteredPoStProof_StackedDrgWindow2KiBV1: { - SectorSize: ss2KiB, - ProofSize: 192, - }, - RegisteredPoStProof_StackedDrgWindow8MiBV1: { - SectorSize: ss8MiB, - ProofSize: 192, - }, - RegisteredPoStProof_StackedDrgWindow512MiBV1: { - SectorSize: ss512MiB, - ProofSize: 192, - }, - RegisteredPoStProof_StackedDrgWindow32GiBV1: { - SectorSize: ss32GiB, - ProofSize: 192, - }, - RegisteredPoStProof_StackedDrgWindow64GiBV1: { - SectorSize: ss64GiB, - ProofSize: 192, - }, -} - -func (p RegisteredPoStProof) SectorSize() (SectorSize, error) { - info, ok := PoStProofInfos[p] - if !ok { - return 0, xerrors.Errorf("unsupported proof type: %v", p) - } - return info.SectorSize, nil -} - -// ProofSize returns the size of window post proofs for the given sector type. -func (p RegisteredPoStProof) ProofSize() (uint64, error) { - info, ok := PoStProofInfos[p] - if !ok { - return 0, xerrors.Errorf("unsupported proof type: %v", p) - } - return info.ProofSize, nil -} - -type SealRandomness Randomness -type InteractiveSealRandomness Randomness -type PoStRandomness Randomness diff --git a/vendor/github.com/filecoin-project/go-state-types/big/int.go b/vendor/github.com/filecoin-project/go-state-types/big/int.go deleted file mode 100644 index 5ba1536..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/big/int.go +++ /dev/null @@ -1,350 +0,0 @@ -package big - -import ( - "encoding/json" - "fmt" - "io" - "math/big" - - cbg "github.com/whyrusleeping/cbor-gen" -) - -// BigIntMaxSerializedLen is the max length of a byte slice representing a CBOR serialized big. -const BigIntMaxSerializedLen = 128 - -type Int struct { - *big.Int -} - -func NewInt(i int64) Int { - return Int{big.NewInt(0).SetInt64(i)} -} - -func NewIntUnsigned(i uint64) Int { - return Int{big.NewInt(0).SetUint64(i)} -} - -func NewFromGo(i *big.Int) Int { - return Int{big.NewInt(0).Set(i)} -} - -func Zero() Int { - return NewInt(0) -} - -// PositiveFromUnsignedBytes interprets b as the bytes of a big-endian unsigned -// integer and returns a positive Int with this absolute value. -func PositiveFromUnsignedBytes(b []byte) Int { - i := big.NewInt(0).SetBytes(b) - return Int{i} -} - -// MustFromString convers dec string into big integer and panics if conversion -// is not sucessful. -func MustFromString(s string) Int { - v, err := FromString(s) - if err != nil { - panic(err) - } - return v -} - -func FromString(s string) (Int, error) { - v, ok := big.NewInt(0).SetString(s, 10) - if !ok { - return Int{}, fmt.Errorf("failed to parse string as a big int") - } - - return Int{v}, nil -} - -func (bi Int) Copy() Int { - return Int{Int: new(big.Int).Set(bi.Int)} -} - -func Product(ints ...Int) Int { - p := NewInt(1) - for _, i := range ints { - p = Mul(p, i) - } - return p -} - -func Mul(a, b Int) Int { - return Int{big.NewInt(0).Mul(a.Int, b.Int)} -} - -func Div(a, b Int) Int { - return Int{big.NewInt(0).Div(a.Int, b.Int)} -} - -func Mod(a, b Int) Int { - return Int{big.NewInt(0).Mod(a.Int, b.Int)} -} - -func Add(a, b Int) Int { - return Int{big.NewInt(0).Add(a.Int, b.Int)} -} - -func Sum(ints ...Int) Int { - sum := Zero() - for _, i := range ints { - sum = Add(sum, i) - } - return sum -} - -func Subtract(num1 Int, ints ...Int) Int { - sub := num1 - for _, i := range ints { - sub = Sub(sub, i) - } - return sub -} - -func Sub(a, b Int) Int { - return Int{big.NewInt(0).Sub(a.Int, b.Int)} -} - -// Returns a**e unless e <= 0 (in which case returns 1). -func Exp(a Int, e Int) Int { - return Int{big.NewInt(0).Exp(a.Int, e.Int, nil)} -} - -// Returns x << n -func Lsh(a Int, n uint) Int { - return Int{big.NewInt(0).Lsh(a.Int, n)} -} - -// Returns x >> n -func Rsh(a Int, n uint) Int { - return Int{big.NewInt(0).Rsh(a.Int, n)} -} - -func BitLen(a Int) uint { - return uint(a.Int.BitLen()) -} - -func Max(x, y Int) Int { - // taken from max.Max() - if x.Equals(Zero()) && x.Equals(y) { - if x.Sign() != 0 { - return y - } - return x - } - if x.GreaterThan(y) { - return x - } - return y -} - -func Min(x, y Int) Int { - // taken from max.Min() - if x.Equals(Zero()) && x.Equals(y) { - if x.Sign() != 0 { - return x - } - return y - } - if x.LessThan(y) { - return x - } - return y -} - -func Cmp(a, b Int) int { - return a.Int.Cmp(b.Int) -} - -// LessThan returns true if bi < o -func (bi Int) LessThan(o Int) bool { - return Cmp(bi, o) < 0 -} - -// LessThanEqual returns true if bi <= o -func (bi Int) LessThanEqual(o Int) bool { - return bi.LessThan(o) || bi.Equals(o) -} - -// GreaterThan returns true if bi > o -func (bi Int) GreaterThan(o Int) bool { - return Cmp(bi, o) > 0 -} - -// GreaterThanEqual returns true if bi >= o -func (bi Int) GreaterThanEqual(o Int) bool { - return bi.GreaterThan(o) || bi.Equals(o) -} - -// Neg returns the negative of bi. -func (bi Int) Neg() Int { - return Int{big.NewInt(0).Neg(bi.Int)} -} - -// Abs returns the absolute value of bi. -func (bi Int) Abs() Int { - if bi.GreaterThanEqual(Zero()) { - return bi.Copy() - } - return bi.Neg() -} - -// Equals returns true if bi == o -func (bi Int) Equals(o Int) bool { - return Cmp(bi, o) == 0 -} - -func (bi *Int) MarshalJSON() ([]byte, error) { - if bi.Int == nil { - zero := Zero() - return json.Marshal(zero) - } - return json.Marshal(bi.String()) -} - -func (bi *Int) UnmarshalJSON(b []byte) error { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - - i, ok := big.NewInt(0).SetString(s, 10) - if !ok { - return fmt.Errorf("failed to parse big string: '%s'", string(b)) - } - - bi.Int = i - return nil -} - -func (bi *Int) Bytes() ([]byte, error) { - if bi.Int == nil { - return []byte{}, fmt.Errorf("failed to convert to bytes, big is nil") - } - - switch { - case bi.Sign() > 0: - return append([]byte{0}, bi.Int.Bytes()...), nil - case bi.Sign() < 0: - return append([]byte{1}, bi.Int.Bytes()...), nil - default: // bi.Sign() == 0: - return []byte{}, nil - } -} - -func FromBytes(buf []byte) (Int, error) { - if len(buf) == 0 { - return NewInt(0), nil - } - - var negative bool - switch buf[0] { - case 0: - negative = false - case 1: - negative = true - default: - return Zero(), fmt.Errorf("big int prefix should be either 0 or 1, got %d", buf[0]) - } - - i := big.NewInt(0).SetBytes(buf[1:]) - if negative { - i.Neg(i) - } - - return Int{i}, nil -} - -func (bi *Int) MarshalBinary() ([]byte, error) { - if bi.Int == nil { - zero := Zero() - return zero.Bytes() - } - return bi.Bytes() -} - -func (bi *Int) UnmarshalBinary(buf []byte) error { - i, err := FromBytes(buf) - if err != nil { - return err - } - - *bi = i - - return nil -} - -func (bi *Int) MarshalCBOR(w io.Writer) error { - if bi.Int == nil { - zero := Zero() - return zero.MarshalCBOR(w) - } - - enc, err := bi.Bytes() - if err != nil { - return err - } - - encLen := len(enc) - if encLen > BigIntMaxSerializedLen { - return fmt.Errorf("big integer byte array too long (%d bytes)", encLen) - } - - header := cbg.CborEncodeMajorType(cbg.MajByteString, uint64(encLen)) - if _, err := w.Write(header); err != nil { - return err - } - - if _, err := w.Write(enc); err != nil { - return err - } - - return nil -} - -func (bi *Int) UnmarshalCBOR(br io.Reader) error { - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - - if maj != cbg.MajByteString { - return fmt.Errorf("cbor input for fil big int was not a byte string (%x)", maj) - } - - if extra == 0 { - bi.Int = big.NewInt(0) - return nil - } - - if extra > BigIntMaxSerializedLen { - return fmt.Errorf("big integer byte array too long (%d bytes)", extra) - } - - buf := make([]byte, extra) - if _, err := io.ReadFull(br, buf); err != nil { - return err - } - - i, err := FromBytes(buf) - if err != nil { - return err - } - - *bi = i - - return nil -} - -func (bi *Int) IsZero() bool { - return bi.Int.Sign() == 0 -} - -func (bi *Int) Nil() bool { - return bi.Int == nil -} - -func (bi *Int) NilOrZero() bool { - return bi.Int == nil || bi.Int.Sign() == 0 -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/methods.go b/vendor/github.com/filecoin-project/go-state-types/builtin/methods.go deleted file mode 100644 index 426906a..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/methods.go +++ /dev/null @@ -1,120 +0,0 @@ -package builtin - -import ( - "github.com/filecoin-project/go-state-types/abi" -) - -const ( - MethodSend = abi.MethodNum(0) - MethodConstructor = abi.MethodNum(1) -) - -var MethodsAccount = struct { - Constructor abi.MethodNum - PubkeyAddress abi.MethodNum - AuthenticateMessage abi.MethodNum -}{MethodConstructor, 2, 3} - -var MethodsInit = struct { - Constructor abi.MethodNum - Exec abi.MethodNum -}{MethodConstructor, 2} - -var MethodsCron = struct { - Constructor abi.MethodNum - EpochTick abi.MethodNum -}{MethodConstructor, 2} - -var MethodsReward = struct { - Constructor abi.MethodNum - AwardBlockReward abi.MethodNum - ThisEpochReward abi.MethodNum - UpdateNetworkKPI abi.MethodNum -}{MethodConstructor, 2, 3, 4} - -var MethodsMultisig = struct { - Constructor abi.MethodNum - Propose abi.MethodNum - Approve abi.MethodNum - Cancel abi.MethodNum - AddSigner abi.MethodNum - RemoveSigner abi.MethodNum - SwapSigner abi.MethodNum - ChangeNumApprovalsThreshold abi.MethodNum - LockBalance abi.MethodNum -}{MethodConstructor, 2, 3, 4, 5, 6, 7, 8, 9} - -var MethodsPaych = struct { - Constructor abi.MethodNum - UpdateChannelState abi.MethodNum - Settle abi.MethodNum - Collect abi.MethodNum -}{MethodConstructor, 2, 3, 4} - -var MethodsMarket = struct { - Constructor abi.MethodNum - AddBalance abi.MethodNum - WithdrawBalance abi.MethodNum - PublishStorageDeals abi.MethodNum - VerifyDealsForActivation abi.MethodNum - ActivateDeals abi.MethodNum - OnMinerSectorsTerminate abi.MethodNum - ComputeDataCommitment abi.MethodNum - CronTick abi.MethodNum -}{MethodConstructor, 2, 3, 4, 5, 6, 7, 8, 9} - -var MethodsPower = struct { - Constructor abi.MethodNum - CreateMiner abi.MethodNum - UpdateClaimedPower abi.MethodNum - EnrollCronEvent abi.MethodNum - CronTick abi.MethodNum - UpdatePledgeTotal abi.MethodNum - Deprecated1 abi.MethodNum - SubmitPoRepForBulkVerify abi.MethodNum - CurrentTotalPower abi.MethodNum -}{MethodConstructor, 2, 3, 4, 5, 6, 7, 8, 9} - -var MethodsMiner = struct { - Constructor abi.MethodNum - ControlAddresses abi.MethodNum - ChangeWorkerAddress abi.MethodNum - ChangePeerID abi.MethodNum - SubmitWindowedPoSt abi.MethodNum - PreCommitSector abi.MethodNum - ProveCommitSector abi.MethodNum - ExtendSectorExpiration abi.MethodNum - TerminateSectors abi.MethodNum - DeclareFaults abi.MethodNum - DeclareFaultsRecovered abi.MethodNum - OnDeferredCronEvent abi.MethodNum - CheckSectorProven abi.MethodNum - ApplyRewards abi.MethodNum - ReportConsensusFault abi.MethodNum - WithdrawBalance abi.MethodNum - ConfirmSectorProofsValid abi.MethodNum - ChangeMultiaddrs abi.MethodNum - CompactPartitions abi.MethodNum - CompactSectorNumbers abi.MethodNum - ConfirmUpdateWorkerKey abi.MethodNum - RepayDebt abi.MethodNum - ChangeOwnerAddress abi.MethodNum - DisputeWindowedPoSt abi.MethodNum - PreCommitSectorBatch abi.MethodNum - ProveCommitAggregate abi.MethodNum - ProveReplicaUpdates abi.MethodNum - PreCommitSectorBatch2 abi.MethodNum - ProveReplicaUpdates2 abi.MethodNum - ChangeBeneficiary abi.MethodNum - GetBeneficiary abi.MethodNum -}{MethodConstructor, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31} - -var MethodsVerifiedRegistry = struct { - Constructor abi.MethodNum - AddVerifier abi.MethodNum - RemoveVerifier abi.MethodNum - AddVerifiedClient abi.MethodNum - UseBytes abi.MethodNum - RestoreBytes abi.MethodNum - RemoveVerifiedClientDataCap abi.MethodNum -}{MethodConstructor, 2, 3, 4, 5, 6, 7} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/network.go b/vendor/github.com/filecoin-project/go-state-types/builtin/network.go deleted file mode 100644 index 8f86e54..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/network.go +++ /dev/null @@ -1,63 +0,0 @@ -package builtin - -import ( - "fmt" - - "github.com/filecoin-project/go-state-types/big" -) - -// PARAM_SPEC -// The duration of a chain epoch. -// Motivation: It guarantees that a block is propagated and WinningPoSt can be successfully done in time all supported miners. -// Usage: It is used for deriving epoch-denominated periods that are more naturally expressed in clock time. -// TODO: In lieu of a real configuration mechanism for this value, we'd like to make it a var so that implementations -// can override it at runtime. Doing so requires changing all the static references to it in this repo to go through -// late-binding function calls, or they'll see the "wrong" value. -// https://github.com/filecoin-project/specs-actors/issues/353 -// If EpochDurationSeconds is changed, update `BaselineExponent`, `lambda`, and // `expLamSubOne` in ./reward/reward_logic.go -// You can re-calculate these constants by changing the epoch duration in ./reward/reward_calc.py and running it. -const EpochDurationSeconds = 30 -const SecondsInHour = 60 * 60 -const SecondsInDay = 24 * SecondsInHour -const EpochsInHour = SecondsInHour / EpochDurationSeconds -const EpochsInDay = 24 * EpochsInHour -const EpochsInYear = 365 * EpochsInDay - -// PARAM_SPEC -// Expected number of block quality in an epoch (e.g. 1 block with block quality 5, or 5 blocks with quality 1) -// Motivation: It ensures that there is enough on-chain throughput -// Usage: It is used to calculate the block reward. -var ExpectedLeadersPerEpoch = int64(5) - -func init() { - //noinspection GoBoolExpressions - if SecondsInHour%EpochDurationSeconds != 0 { - // This even division is an assumption that other code might unwittingly make. - // Don't rely on it on purpose, though. - // While we're pretty sure everything will still work fine, we're safer maintaining this invariant anyway. - panic(fmt.Sprintf("epoch duration %d does not evenly divide one hour (%d)", EpochDurationSeconds, SecondsInHour)) - } -} - -// Number of token units in an abstract "FIL" token. -// The network works purely in the indivisible token amounts. This constant converts to a fixed decimal with more -// human-friendly scale. -var TokenPrecision = big.NewIntUnsigned(1_000_000_000_000_000_000) - -// The maximum supply of Filecoin that will ever exist (in token units) -var TotalFilecoin = big.Mul(big.NewIntUnsigned(2_000_000_000), TokenPrecision) - -// Quality multiplier for committed capacity (no deals) in a sector -var QualityBaseMultiplier = big.NewInt(10) - -// Quality multiplier for unverified deals in a sector -var DealWeightMultiplier = big.NewInt(10) - -// Quality multiplier for verified deals in a sector -var VerifiedDealWeightMultiplier = big.NewInt(100) - -// Precision used for making QA power calculations -const SectorQualityPrecision = 20 - -// 1 NanoFIL -var OneNanoFIL = big.NewInt(1_000_000_000) diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/quantize.go b/vendor/github.com/filecoin-project/go-state-types/builtin/quantize.go deleted file mode 100644 index 6e085a2..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/quantize.go +++ /dev/null @@ -1,13 +0,0 @@ -package builtin - -import "github.com/filecoin-project/go-state-types/abi" - -// A spec for quantization. -type QuantSpec struct { - unit abi.ChainEpoch // The unit of quantization - offset abi.ChainEpoch // The offset from zero from which to base the modulus -} - -func NewQuantSpec(unit, offset abi.ChainEpoch) QuantSpec { - return QuantSpec{unit: unit, offset: offset} -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/sector.go b/vendor/github.com/filecoin-project/go-state-types/builtin/sector.go deleted file mode 100644 index f969f83..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/sector.go +++ /dev/null @@ -1,130 +0,0 @@ -package builtin - -import ( - stabi "github.com/filecoin-project/go-state-types/abi" - "golang.org/x/xerrors" -) - -// Policy values associated with a seal proof type. -type SealProofPolicy struct { - SectorMaxLifetime stabi.ChainEpoch -} - -// For V1 Stacked DRG sectors, the max is 540 days since Network Version 11 -// according to https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0014.md -const EpochsIn540Days = stabi.ChainEpoch(540 * EpochsInDay) - -// For V1_1 Stacked DRG sectors, the max is 5 years -const EpochsInFiveYears = stabi.ChainEpoch(5 * EpochsInYear) - -// 540-day maximum life time setting for V1 since network version 11 -var SealProofPoliciesV11 = map[stabi.RegisteredSealProof]*SealProofPolicy{ - stabi.RegisteredSealProof_StackedDrg2KiBV1: { - SectorMaxLifetime: EpochsIn540Days, - }, - stabi.RegisteredSealProof_StackedDrg8MiBV1: { - SectorMaxLifetime: EpochsIn540Days, - }, - stabi.RegisteredSealProof_StackedDrg512MiBV1: { - SectorMaxLifetime: EpochsIn540Days, - }, - stabi.RegisteredSealProof_StackedDrg32GiBV1: { - SectorMaxLifetime: EpochsIn540Days, - }, - stabi.RegisteredSealProof_StackedDrg64GiBV1: { - SectorMaxLifetime: EpochsIn540Days, - }, - - stabi.RegisteredSealProof_StackedDrg2KiBV1_1: { - SectorMaxLifetime: EpochsInFiveYears, - }, - stabi.RegisteredSealProof_StackedDrg8MiBV1_1: { - SectorMaxLifetime: EpochsInFiveYears, - }, - stabi.RegisteredSealProof_StackedDrg512MiBV1_1: { - SectorMaxLifetime: EpochsInFiveYears, - }, - stabi.RegisteredSealProof_StackedDrg32GiBV1_1: { - SectorMaxLifetime: EpochsInFiveYears, - }, - stabi.RegisteredSealProof_StackedDrg64GiBV1_1: { - SectorMaxLifetime: EpochsInFiveYears, - }, -} - -// Returns the partition size, in sectors, associated with a seal proof type. -// The partition size is the number of sectors proved in a single PoSt proof. -func SealProofWindowPoStPartitionSectors(p stabi.RegisteredSealProof) (uint64, error) { - wPoStProofType, err := p.RegisteredWindowPoStProof() - if err != nil { - return 0, err - } - return PoStProofWindowPoStPartitionSectors(wPoStProofType) -} - -// SectorMaximumLifetime is the maximum duration a sector sealed with this proof may exist between activation and expiration -func SealProofSectorMaximumLifetime(p stabi.RegisteredSealProof) (stabi.ChainEpoch, error) { - info, ok := SealProofPoliciesV11[p] - if !ok { - return 0, xerrors.Errorf("unsupported proof type: %v", p) - } - return info.SectorMaxLifetime, nil -} - -// The minimum power of an individual miner to meet the threshold for leader election (in bytes). -// Motivation: -// - Limits sybil generation -// - Improves consensus fault detection -// - Guarantees a minimum fee for consensus faults -// - Ensures that a specific soundness for the power table -// Note: We may be able to reduce this in the future, addressing consensus faults with more complicated penalties, -// sybil generation with crypto-economic mechanism, and PoSt soundness by increasing the challenges for small miners. -func ConsensusMinerMinPower(p stabi.RegisteredPoStProof) (stabi.StoragePower, error) { - info, ok := PoStProofPolicies[p] - if !ok { - return stabi.NewStoragePower(0), xerrors.Errorf("unsupported proof type: %v", p) - } - return info.ConsensusMinerMinPower, nil -} - -// Policy values associated with a PoSt proof type. -type PoStProofPolicy struct { - WindowPoStPartitionSectors uint64 - ConsensusMinerMinPower stabi.StoragePower -} - -// Partition sizes must match those used by the proofs library. -// See https://github.com/filecoin-project/rust-fil-proofs/blob/master/filecoin-proofs/src/constants.rs#L85 -var PoStProofPolicies = map[stabi.RegisteredPoStProof]*PoStProofPolicy{ - stabi.RegisteredPoStProof_StackedDrgWindow2KiBV1: { - WindowPoStPartitionSectors: 2, - ConsensusMinerMinPower: stabi.NewStoragePower(10 << 40), - }, - stabi.RegisteredPoStProof_StackedDrgWindow8MiBV1: { - WindowPoStPartitionSectors: 2, - ConsensusMinerMinPower: stabi.NewStoragePower(10 << 40), - }, - stabi.RegisteredPoStProof_StackedDrgWindow512MiBV1: { - WindowPoStPartitionSectors: 2, - ConsensusMinerMinPower: stabi.NewStoragePower(10 << 40), - }, - stabi.RegisteredPoStProof_StackedDrgWindow32GiBV1: { - WindowPoStPartitionSectors: 2349, - ConsensusMinerMinPower: stabi.NewStoragePower(10 << 40), - }, - stabi.RegisteredPoStProof_StackedDrgWindow64GiBV1: { - WindowPoStPartitionSectors: 2300, - ConsensusMinerMinPower: stabi.NewStoragePower(10 << 40), - }, - // Winning PoSt proof types omitted. -} - -// Returns the partition size, in sectors, associated with a Window PoSt proof type. -// The partition size is the number of sectors proved in a single PoSt proof. -func PoStProofWindowPoStPartitionSectors(p stabi.RegisteredPoStProof) (uint64, error) { - info, ok := PoStProofPolicies[p] - if !ok { - return 0, xerrors.Errorf("unsupported proof type: %v", p) - } - return info.WindowPoStPartitionSectors, nil -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/shared.go b/vendor/github.com/filecoin-project/go-state-types/builtin/shared.go deleted file mode 100644 index 3b9f247..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/shared.go +++ /dev/null @@ -1,16 +0,0 @@ -package builtin - -import ( - "github.com/filecoin-project/go-state-types/big" -) - -///// Code shared by multiple built-in actors. ///// - -// Default log2 of branching factor for HAMTs. -// This value has been empirically chosen, but the optimal value for maps with different mutation profiles may differ. -const DefaultHamtBitwidth = 5 - -type BigFrac struct { - Numerator big.Int - Denominator big.Int -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/singletons.go b/vendor/github.com/filecoin-project/go-state-types/builtin/singletons.go deleted file mode 100644 index f98c4de..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/singletons.go +++ /dev/null @@ -1,29 +0,0 @@ -package builtin - -import ( - addr "github.com/filecoin-project/go-address" -) - -// Addresses for singleton system actors. -var ( - // Distinguished AccountActor that is the source of system implicit messages. - SystemActorAddr = mustMakeAddress(0) - InitActorAddr = mustMakeAddress(1) - RewardActorAddr = mustMakeAddress(2) - CronActorAddr = mustMakeAddress(3) - StoragePowerActorAddr = mustMakeAddress(4) - StorageMarketActorAddr = mustMakeAddress(5) - VerifiedRegistryActorAddr = mustMakeAddress(6) - // Distinguished AccountActor that is the destination of all burnt funds. - BurntFundsActorAddr = mustMakeAddress(99) -) - -const FirstNonSingletonActorId = 100 - -func mustMakeAddress(id uint64) addr.Address { - address, err := addr.NewIDAddress(id) - if err != nil { - panic(err) - } - return address -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/market/cbor_gen.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/market/cbor_gen.go deleted file mode 100644 index 4894ef8..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/market/cbor_gen.go +++ /dev/null @@ -1,1828 +0,0 @@ -// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. - -package market - -import ( - "fmt" - "io" - - abi "github.com/filecoin-project/go-state-types/abi" - cbg "github.com/whyrusleeping/cbor-gen" - xerrors "golang.org/x/xerrors" -) - -var _ = xerrors.Errorf - -var lengthBufState = []byte{139} - -func (t *State) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufState); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Proposals (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.Proposals); err != nil { - return xerrors.Errorf("failed to write cid field t.Proposals: %w", err) - } - - // t.States (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.States); err != nil { - return xerrors.Errorf("failed to write cid field t.States: %w", err) - } - - // t.PendingProposals (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.PendingProposals); err != nil { - return xerrors.Errorf("failed to write cid field t.PendingProposals: %w", err) - } - - // t.EscrowTable (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.EscrowTable); err != nil { - return xerrors.Errorf("failed to write cid field t.EscrowTable: %w", err) - } - - // t.LockedTable (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.LockedTable); err != nil { - return xerrors.Errorf("failed to write cid field t.LockedTable: %w", err) - } - - // t.NextID (abi.DealID) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.NextID)); err != nil { - return err - } - - // t.DealOpsByEpoch (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.DealOpsByEpoch); err != nil { - return xerrors.Errorf("failed to write cid field t.DealOpsByEpoch: %w", err) - } - - // t.LastCron (abi.ChainEpoch) (int64) - if t.LastCron >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.LastCron)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.LastCron-1)); err != nil { - return err - } - } - - // t.TotalClientLockedCollateral (big.Int) (struct) - if err := t.TotalClientLockedCollateral.MarshalCBOR(w); err != nil { - return err - } - - // t.TotalProviderLockedCollateral (big.Int) (struct) - if err := t.TotalProviderLockedCollateral.MarshalCBOR(w); err != nil { - return err - } - - // t.TotalClientStorageFee (big.Int) (struct) - if err := t.TotalClientStorageFee.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *State) UnmarshalCBOR(r io.Reader) error { - *t = State{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 11 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Proposals (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.Proposals: %w", err) - } - - t.Proposals = c - - } - // t.States (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.States: %w", err) - } - - t.States = c - - } - // t.PendingProposals (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.PendingProposals: %w", err) - } - - t.PendingProposals = c - - } - // t.EscrowTable (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.EscrowTable: %w", err) - } - - t.EscrowTable = c - - } - // t.LockedTable (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.LockedTable: %w", err) - } - - t.LockedTable = c - - } - // t.NextID (abi.DealID) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.NextID = abi.DealID(extra) - - } - // t.DealOpsByEpoch (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.DealOpsByEpoch: %w", err) - } - - t.DealOpsByEpoch = c - - } - // t.LastCron (abi.ChainEpoch) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.LastCron = abi.ChainEpoch(extraI) - } - // t.TotalClientLockedCollateral (big.Int) (struct) - - { - - if err := t.TotalClientLockedCollateral.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.TotalClientLockedCollateral: %w", err) - } - - } - // t.TotalProviderLockedCollateral (big.Int) (struct) - - { - - if err := t.TotalProviderLockedCollateral.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.TotalProviderLockedCollateral: %w", err) - } - - } - // t.TotalClientStorageFee (big.Int) (struct) - - { - - if err := t.TotalClientStorageFee.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.TotalClientStorageFee: %w", err) - } - - } - return nil -} - -var lengthBufDealState = []byte{131} - -func (t *DealState) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufDealState); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.SectorStartEpoch (abi.ChainEpoch) (int64) - if t.SectorStartEpoch >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SectorStartEpoch)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.SectorStartEpoch-1)); err != nil { - return err - } - } - - // t.LastUpdatedEpoch (abi.ChainEpoch) (int64) - if t.LastUpdatedEpoch >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.LastUpdatedEpoch)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.LastUpdatedEpoch-1)); err != nil { - return err - } - } - - // t.SlashEpoch (abi.ChainEpoch) (int64) - if t.SlashEpoch >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SlashEpoch)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.SlashEpoch-1)); err != nil { - return err - } - } - return nil -} - -func (t *DealState) UnmarshalCBOR(r io.Reader) error { - *t = DealState{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 3 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.SectorStartEpoch (abi.ChainEpoch) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.SectorStartEpoch = abi.ChainEpoch(extraI) - } - // t.LastUpdatedEpoch (abi.ChainEpoch) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.LastUpdatedEpoch = abi.ChainEpoch(extraI) - } - // t.SlashEpoch (abi.ChainEpoch) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.SlashEpoch = abi.ChainEpoch(extraI) - } - return nil -} - -var lengthBufWithdrawBalanceParams = []byte{130} - -func (t *WithdrawBalanceParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufWithdrawBalanceParams); err != nil { - return err - } - - // t.ProviderOrClientAddress (address.Address) (struct) - if err := t.ProviderOrClientAddress.MarshalCBOR(w); err != nil { - return err - } - - // t.Amount (big.Int) (struct) - if err := t.Amount.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *WithdrawBalanceParams) UnmarshalCBOR(r io.Reader) error { - *t = WithdrawBalanceParams{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.ProviderOrClientAddress (address.Address) (struct) - - { - - if err := t.ProviderOrClientAddress.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.ProviderOrClientAddress: %w", err) - } - - } - // t.Amount (big.Int) (struct) - - { - - if err := t.Amount.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Amount: %w", err) - } - - } - return nil -} - -var lengthBufPublishStorageDealsParams = []byte{129} - -func (t *PublishStorageDealsParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufPublishStorageDealsParams); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Deals ([]market.ClientDealProposal) (slice) - if len(t.Deals) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Deals was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Deals))); err != nil { - return err - } - for _, v := range t.Deals { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - return nil -} - -func (t *PublishStorageDealsParams) UnmarshalCBOR(r io.Reader) error { - *t = PublishStorageDealsParams{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Deals ([]market.ClientDealProposal) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Deals: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Deals = make([]ClientDealProposal, extra) - } - - for i := 0; i < int(extra); i++ { - - var v ClientDealProposal - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.Deals[i] = v - } - - return nil -} - -var lengthBufPublishStorageDealsReturn = []byte{130} - -func (t *PublishStorageDealsReturn) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufPublishStorageDealsReturn); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.IDs ([]abi.DealID) (slice) - if len(t.IDs) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.IDs was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.IDs))); err != nil { - return err - } - for _, v := range t.IDs { - if err := cbg.CborWriteHeader(w, cbg.MajUnsignedInt, uint64(v)); err != nil { - return err - } - } - - // t.ValidDeals (bitfield.BitField) (struct) - if err := t.ValidDeals.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *PublishStorageDealsReturn) UnmarshalCBOR(r io.Reader) error { - *t = PublishStorageDealsReturn{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.IDs ([]abi.DealID) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.IDs: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.IDs = make([]abi.DealID, extra) - } - - for i := 0; i < int(extra); i++ { - - maj, val, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return xerrors.Errorf("failed to read uint64 for t.IDs slice: %w", err) - } - - if maj != cbg.MajUnsignedInt { - return xerrors.Errorf("value read for array t.IDs was not a uint, instead got %d", maj) - } - - t.IDs[i] = abi.DealID(val) - } - - // t.ValidDeals (bitfield.BitField) (struct) - - { - - if err := t.ValidDeals.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.ValidDeals: %w", err) - } - - } - return nil -} - -var lengthBufActivateDealsParams = []byte{130} - -func (t *ActivateDealsParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufActivateDealsParams); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.DealIDs ([]abi.DealID) (slice) - if len(t.DealIDs) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.DealIDs was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.DealIDs))); err != nil { - return err - } - for _, v := range t.DealIDs { - if err := cbg.CborWriteHeader(w, cbg.MajUnsignedInt, uint64(v)); err != nil { - return err - } - } - - // t.SectorExpiry (abi.ChainEpoch) (int64) - if t.SectorExpiry >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SectorExpiry)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.SectorExpiry-1)); err != nil { - return err - } - } - return nil -} - -func (t *ActivateDealsParams) UnmarshalCBOR(r io.Reader) error { - *t = ActivateDealsParams{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.DealIDs ([]abi.DealID) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.DealIDs: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.DealIDs = make([]abi.DealID, extra) - } - - for i := 0; i < int(extra); i++ { - - maj, val, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return xerrors.Errorf("failed to read uint64 for t.DealIDs slice: %w", err) - } - - if maj != cbg.MajUnsignedInt { - return xerrors.Errorf("value read for array t.DealIDs was not a uint, instead got %d", maj) - } - - t.DealIDs[i] = abi.DealID(val) - } - - // t.SectorExpiry (abi.ChainEpoch) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.SectorExpiry = abi.ChainEpoch(extraI) - } - return nil -} - -var lengthBufVerifyDealsForActivationParams = []byte{129} - -func (t *VerifyDealsForActivationParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufVerifyDealsForActivationParams); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Sectors ([]market.SectorDeals) (slice) - if len(t.Sectors) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Sectors was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Sectors))); err != nil { - return err - } - for _, v := range t.Sectors { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - return nil -} - -func (t *VerifyDealsForActivationParams) UnmarshalCBOR(r io.Reader) error { - *t = VerifyDealsForActivationParams{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Sectors ([]market.SectorDeals) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Sectors: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Sectors = make([]SectorDeals, extra) - } - - for i := 0; i < int(extra); i++ { - - var v SectorDeals - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.Sectors[i] = v - } - - return nil -} - -var lengthBufVerifyDealsForActivationReturn = []byte{129} - -func (t *VerifyDealsForActivationReturn) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufVerifyDealsForActivationReturn); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Sectors ([]market.SectorWeights) (slice) - if len(t.Sectors) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Sectors was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Sectors))); err != nil { - return err - } - for _, v := range t.Sectors { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - return nil -} - -func (t *VerifyDealsForActivationReturn) UnmarshalCBOR(r io.Reader) error { - *t = VerifyDealsForActivationReturn{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Sectors ([]market.SectorWeights) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Sectors: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Sectors = make([]SectorWeights, extra) - } - - for i := 0; i < int(extra); i++ { - - var v SectorWeights - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.Sectors[i] = v - } - - return nil -} - -var lengthBufComputeDataCommitmentParams = []byte{129} - -func (t *ComputeDataCommitmentParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufComputeDataCommitmentParams); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Inputs ([]*market.SectorDataSpec) (slice) - if len(t.Inputs) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Inputs was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Inputs))); err != nil { - return err - } - for _, v := range t.Inputs { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - return nil -} - -func (t *ComputeDataCommitmentParams) UnmarshalCBOR(r io.Reader) error { - *t = ComputeDataCommitmentParams{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Inputs ([]*market.SectorDataSpec) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Inputs: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Inputs = make([]*SectorDataSpec, extra) - } - - for i := 0; i < int(extra); i++ { - - var v SectorDataSpec - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.Inputs[i] = &v - } - - return nil -} - -var lengthBufComputeDataCommitmentReturn = []byte{129} - -func (t *ComputeDataCommitmentReturn) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufComputeDataCommitmentReturn); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.CommDs ([]typegen.CborCid) (slice) - if len(t.CommDs) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.CommDs was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.CommDs))); err != nil { - return err - } - for _, v := range t.CommDs { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - return nil -} - -func (t *ComputeDataCommitmentReturn) UnmarshalCBOR(r io.Reader) error { - *t = ComputeDataCommitmentReturn{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.CommDs ([]typegen.CborCid) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.CommDs: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.CommDs = make([]cbg.CborCid, extra) - } - - for i := 0; i < int(extra); i++ { - - var v cbg.CborCid - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.CommDs[i] = v - } - - return nil -} - -var lengthBufOnMinerSectorsTerminateParams = []byte{130} - -func (t *OnMinerSectorsTerminateParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufOnMinerSectorsTerminateParams); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Epoch (abi.ChainEpoch) (int64) - if t.Epoch >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Epoch)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Epoch-1)); err != nil { - return err - } - } - - // t.DealIDs ([]abi.DealID) (slice) - if len(t.DealIDs) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.DealIDs was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.DealIDs))); err != nil { - return err - } - for _, v := range t.DealIDs { - if err := cbg.CborWriteHeader(w, cbg.MajUnsignedInt, uint64(v)); err != nil { - return err - } - } - return nil -} - -func (t *OnMinerSectorsTerminateParams) UnmarshalCBOR(r io.Reader) error { - *t = OnMinerSectorsTerminateParams{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Epoch (abi.ChainEpoch) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.Epoch = abi.ChainEpoch(extraI) - } - // t.DealIDs ([]abi.DealID) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.DealIDs: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.DealIDs = make([]abi.DealID, extra) - } - - for i := 0; i < int(extra); i++ { - - maj, val, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return xerrors.Errorf("failed to read uint64 for t.DealIDs slice: %w", err) - } - - if maj != cbg.MajUnsignedInt { - return xerrors.Errorf("value read for array t.DealIDs was not a uint, instead got %d", maj) - } - - t.DealIDs[i] = abi.DealID(val) - } - - return nil -} - -var lengthBufDealProposal = []byte{139} - -func (t *DealProposal) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufDealProposal); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.PieceCID (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.PieceCID); err != nil { - return xerrors.Errorf("failed to write cid field t.PieceCID: %w", err) - } - - // t.PieceSize (abi.PaddedPieceSize) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.PieceSize)); err != nil { - return err - } - - // t.VerifiedDeal (bool) (bool) - if err := cbg.WriteBool(w, t.VerifiedDeal); err != nil { - return err - } - - // t.Client (address.Address) (struct) - if err := t.Client.MarshalCBOR(w); err != nil { - return err - } - - // t.Provider (address.Address) (struct) - if err := t.Provider.MarshalCBOR(w); err != nil { - return err - } - - // t.Label (market.DealLabel) (struct) - if err := t.Label.MarshalCBOR(w); err != nil { - return err - } - - // t.StartEpoch (abi.ChainEpoch) (int64) - if t.StartEpoch >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.StartEpoch)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.StartEpoch-1)); err != nil { - return err - } - } - - // t.EndEpoch (abi.ChainEpoch) (int64) - if t.EndEpoch >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.EndEpoch)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.EndEpoch-1)); err != nil { - return err - } - } - - // t.StoragePricePerEpoch (big.Int) (struct) - if err := t.StoragePricePerEpoch.MarshalCBOR(w); err != nil { - return err - } - - // t.ProviderCollateral (big.Int) (struct) - if err := t.ProviderCollateral.MarshalCBOR(w); err != nil { - return err - } - - // t.ClientCollateral (big.Int) (struct) - if err := t.ClientCollateral.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *DealProposal) UnmarshalCBOR(r io.Reader) error { - *t = DealProposal{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 11 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.PieceCID (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.PieceCID: %w", err) - } - - t.PieceCID = c - - } - // t.PieceSize (abi.PaddedPieceSize) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.PieceSize = abi.PaddedPieceSize(extra) - - } - // t.VerifiedDeal (bool) (bool) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajOther { - return fmt.Errorf("booleans must be major type 7") - } - switch extra { - case 20: - t.VerifiedDeal = false - case 21: - t.VerifiedDeal = true - default: - return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) - } - // t.Client (address.Address) (struct) - - { - - if err := t.Client.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Client: %w", err) - } - - } - // t.Provider (address.Address) (struct) - - { - - if err := t.Provider.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Provider: %w", err) - } - - } - // t.Label (market.DealLabel) (struct) - - { - - if err := t.Label.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Label: %w", err) - } - - } - // t.StartEpoch (abi.ChainEpoch) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.StartEpoch = abi.ChainEpoch(extraI) - } - // t.EndEpoch (abi.ChainEpoch) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.EndEpoch = abi.ChainEpoch(extraI) - } - // t.StoragePricePerEpoch (big.Int) (struct) - - { - - if err := t.StoragePricePerEpoch.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.StoragePricePerEpoch: %w", err) - } - - } - // t.ProviderCollateral (big.Int) (struct) - - { - - if err := t.ProviderCollateral.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.ProviderCollateral: %w", err) - } - - } - // t.ClientCollateral (big.Int) (struct) - - { - - if err := t.ClientCollateral.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.ClientCollateral: %w", err) - } - - } - return nil -} - -var lengthBufClientDealProposal = []byte{130} - -func (t *ClientDealProposal) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufClientDealProposal); err != nil { - return err - } - - // t.Proposal (market.DealProposal) (struct) - if err := t.Proposal.MarshalCBOR(w); err != nil { - return err - } - - // t.ClientSignature (crypto.Signature) (struct) - if err := t.ClientSignature.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *ClientDealProposal) UnmarshalCBOR(r io.Reader) error { - *t = ClientDealProposal{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Proposal (market.DealProposal) (struct) - - { - - if err := t.Proposal.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Proposal: %w", err) - } - - } - // t.ClientSignature (crypto.Signature) (struct) - - { - - if err := t.ClientSignature.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.ClientSignature: %w", err) - } - - } - return nil -} - -var lengthBufSectorDeals = []byte{130} - -func (t *SectorDeals) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufSectorDeals); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.SectorExpiry (abi.ChainEpoch) (int64) - if t.SectorExpiry >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SectorExpiry)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.SectorExpiry-1)); err != nil { - return err - } - } - - // t.DealIDs ([]abi.DealID) (slice) - if len(t.DealIDs) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.DealIDs was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.DealIDs))); err != nil { - return err - } - for _, v := range t.DealIDs { - if err := cbg.CborWriteHeader(w, cbg.MajUnsignedInt, uint64(v)); err != nil { - return err - } - } - return nil -} - -func (t *SectorDeals) UnmarshalCBOR(r io.Reader) error { - *t = SectorDeals{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.SectorExpiry (abi.ChainEpoch) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.SectorExpiry = abi.ChainEpoch(extraI) - } - // t.DealIDs ([]abi.DealID) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.DealIDs: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.DealIDs = make([]abi.DealID, extra) - } - - for i := 0; i < int(extra); i++ { - - maj, val, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return xerrors.Errorf("failed to read uint64 for t.DealIDs slice: %w", err) - } - - if maj != cbg.MajUnsignedInt { - return xerrors.Errorf("value read for array t.DealIDs was not a uint, instead got %d", maj) - } - - t.DealIDs[i] = abi.DealID(val) - } - - return nil -} - -var lengthBufSectorWeights = []byte{131} - -func (t *SectorWeights) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufSectorWeights); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.DealSpace (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.DealSpace)); err != nil { - return err - } - - // t.DealWeight (big.Int) (struct) - if err := t.DealWeight.MarshalCBOR(w); err != nil { - return err - } - - // t.VerifiedDealWeight (big.Int) (struct) - if err := t.VerifiedDealWeight.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *SectorWeights) UnmarshalCBOR(r io.Reader) error { - *t = SectorWeights{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 3 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.DealSpace (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.DealSpace = uint64(extra) - - } - // t.DealWeight (big.Int) (struct) - - { - - if err := t.DealWeight.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.DealWeight: %w", err) - } - - } - // t.VerifiedDealWeight (big.Int) (struct) - - { - - if err := t.VerifiedDealWeight.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.VerifiedDealWeight: %w", err) - } - - } - return nil -} - -var lengthBufSectorDataSpec = []byte{130} - -func (t *SectorDataSpec) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufSectorDataSpec); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.DealIDs ([]abi.DealID) (slice) - if len(t.DealIDs) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.DealIDs was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.DealIDs))); err != nil { - return err - } - for _, v := range t.DealIDs { - if err := cbg.CborWriteHeader(w, cbg.MajUnsignedInt, uint64(v)); err != nil { - return err - } - } - - // t.SectorType (abi.RegisteredSealProof) (int64) - if t.SectorType >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SectorType)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.SectorType-1)); err != nil { - return err - } - } - return nil -} - -func (t *SectorDataSpec) UnmarshalCBOR(r io.Reader) error { - *t = SectorDataSpec{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.DealIDs ([]abi.DealID) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.DealIDs: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.DealIDs = make([]abi.DealID, extra) - } - - for i := 0; i < int(extra); i++ { - - maj, val, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return xerrors.Errorf("failed to read uint64 for t.DealIDs slice: %w", err) - } - - if maj != cbg.MajUnsignedInt { - return xerrors.Errorf("value read for array t.DealIDs was not a uint, instead got %d", maj) - } - - t.DealIDs[i] = abi.DealID(val) - } - - // t.SectorType (abi.RegisteredSealProof) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.SectorType = abi.RegisteredSealProof(extraI) - } - return nil -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/market/deal.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/market/deal.go deleted file mode 100644 index c6c2da2..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/market/deal.go +++ /dev/null @@ -1,248 +0,0 @@ -package market - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "unicode/utf8" - - addr "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - acrypto "github.com/filecoin-project/go-state-types/crypto" - mh "github.com/multiformats/go-multihash" - cbg "github.com/whyrusleeping/cbor-gen" - "golang.org/x/xerrors" - - "github.com/ipfs/go-cid" -) - -var PieceCIDPrefix = cid.Prefix{ - Version: 1, - Codec: cid.FilCommitmentUnsealed, - MhType: mh.SHA2_256_TRUNC254_PADDED, - MhLength: 32, -} - -type DealState struct { - SectorStartEpoch abi.ChainEpoch // -1 if not yet included in proven sector - LastUpdatedEpoch abi.ChainEpoch // -1 if deal state never updated - SlashEpoch abi.ChainEpoch // -1 if deal never slashed -} - -// The DealLabel is a kinded union of string or byte slice. -// It serializes to a CBOR string or CBOR byte string depending on which form it takes. -// The zero value is serialized as an empty CBOR string (maj type 3). -type DealLabel struct { - bs []byte - notString bool -} - -// Zero value of DealLabel is canonical EmptyDealLabel -var EmptyDealLabel = DealLabel{} - -func NewLabelFromString(s string) (DealLabel, error) { - if len(s) > DealMaxLabelSize { - return EmptyDealLabel, xerrors.Errorf("provided string is too large to be a label (%d), max length (%d)", len(s), DealMaxLabelSize) - } - if !utf8.ValidString(s) { - return EmptyDealLabel, xerrors.Errorf("provided string is invalid utf8") - } - return DealLabel{ - bs: []byte(s), - notString: false, - }, nil -} - -func NewLabelFromBytes(b []byte) (DealLabel, error) { - if len(b) > DealMaxLabelSize { - return EmptyDealLabel, xerrors.Errorf("provided bytes are too large to be a label (%d), max length (%d)", len(b), DealMaxLabelSize) - } - - return DealLabel{ - bs: b, - notString: true, - }, nil -} - -func (label DealLabel) IsString() bool { - return !label.notString -} - -func (label DealLabel) IsBytes() bool { - return label.notString -} - -func (label DealLabel) ToString() (string, error) { - if !label.IsString() { - return "", xerrors.Errorf("label is not string") - } - - return string(label.bs), nil -} - -func (label DealLabel) ToBytes() ([]byte, error) { - if !label.IsBytes() { - return nil, xerrors.Errorf("label is not bytes") - } - return label.bs, nil -} - -func (label DealLabel) Length() int { - return len(label.bs) -} - -func (l DealLabel) Equals(o DealLabel) bool { - return bytes.Equal(l.bs, o.bs) && l.notString == o.notString -} - -func (label *DealLabel) MarshalCBOR(w io.Writer) error { - scratch := make([]byte, 9) - - // nil *DealLabel counts as EmptyLabel - // on chain structures should never have a pointer to a DealLabel but the case is included for completeness - if label == nil { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, 0); err != nil { - return err - } - _, err := io.WriteString(w, string("")) - return err - } - if len(label.bs) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("label is too long to marshal (%d), max allowed (%d)", len(label.bs), cbg.ByteArrayMaxLen) - } - - majorType := byte(cbg.MajByteString) - if label.IsString() { - majorType = cbg.MajTextString - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, majorType, uint64(len(label.bs))); err != nil { - return err - } - _, err := w.Write(label.bs) - return err -} - -func (label *DealLabel) UnmarshalCBOR(br io.Reader) error { - if label == nil { - return xerrors.Errorf("cannot unmarshal into nil pointer") - } - - // reset fields - label.bs = nil - - scratch := make([]byte, 8) - - maj, length, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajTextString && maj != cbg.MajByteString { - return fmt.Errorf("unexpected major tag (%d) when unmarshaling DealLabel: only textString (%d) or byteString (%d) expected", maj, cbg.MajTextString, cbg.MajByteString) - } - if length > cbg.ByteArrayMaxLen { - return fmt.Errorf("label was too long (%d), max allowed (%d)", length, cbg.ByteArrayMaxLen) - } - buf := make([]byte, length) - _, err = io.ReadAtLeast(br, buf, int(length)) - if err != nil { - return err - } - label.bs = buf - label.notString = maj != cbg.MajTextString - if !label.notString && !utf8.ValidString(string(buf)) { - return fmt.Errorf("label string not valid utf8") - } - - return nil -} - -func (label DealLabel) MarshalJSON() ([]byte, error) { - if !label.IsString() { - return json.Marshal("") - } - - str, err := label.ToString() - if err != nil { - return nil, xerrors.Errorf("failed to convert to string: %w", err) - } - - return json.Marshal(str) -} - -func (label *DealLabel) UnmarshalJSON(b []byte) error { - var str string - if err := json.Unmarshal(b, &str); err != nil { - return xerrors.Errorf("failed to unmarshal string: %w", err) - } - - newLabel, err := NewLabelFromString(str) - if err != nil { - return xerrors.Errorf("failed to create label from string: %w", err) - } - - *label = newLabel - return nil -} - -// Note: Deal Collateral is only released and returned to clients and miners -// when the storage deal stops counting towards power. In the current iteration, -// it will be released when the sector containing the storage deals expires, -// even though some storage deals can expire earlier than the sector does. -// Collaterals are denominated in PerEpoch to incur a cost for self dealing or -// minimal deals that last for a long time. -// Note: ClientCollateralPerEpoch may not be needed and removed pending future confirmation. -// There will be a Minimum value for both client and provider deal collateral. -type DealProposal struct { - PieceCID cid.Cid `checked:"true"` // Checked in validateDeal, CommP - PieceSize abi.PaddedPieceSize - VerifiedDeal bool - Client addr.Address - Provider addr.Address - - // Label is an arbitrary client chosen label to apply to the deal - Label DealLabel - - // Nominal start epoch. Deal payment is linear between StartEpoch and EndEpoch, - // with total amount StoragePricePerEpoch * (EndEpoch - StartEpoch). - // Storage deal must appear in a sealed (proven) sector no later than StartEpoch, - // otherwise it is invalid. - StartEpoch abi.ChainEpoch - EndEpoch abi.ChainEpoch - StoragePricePerEpoch abi.TokenAmount - - ProviderCollateral abi.TokenAmount - ClientCollateral abi.TokenAmount -} - -// ClientDealProposal is a DealProposal signed by a client -type ClientDealProposal struct { - Proposal DealProposal - ClientSignature acrypto.Signature -} - -func (p *DealProposal) Duration() abi.ChainEpoch { - return p.EndEpoch - p.StartEpoch -} - -func (p *DealProposal) TotalStorageFee() abi.TokenAmount { - return big.Mul(p.StoragePricePerEpoch, big.NewInt(int64(p.Duration()))) -} - -func (p *DealProposal) ClientBalanceRequirement() abi.TokenAmount { - return big.Add(p.ClientCollateral, p.TotalStorageFee()) -} - -func (p *DealProposal) ProviderBalanceRequirement() abi.TokenAmount { - return p.ProviderCollateral -} - -func (p *DealProposal) Cid() (cid.Cid, error) { - buf := new(bytes.Buffer) - if err := p.MarshalCBOR(buf); err != nil { - return cid.Undef, err - } - return abi.CidBuilder.Sum(buf.Bytes()) -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/market/market_state.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/market/market_state.go deleted file mode 100644 index 3bbe341..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/market/market_state.go +++ /dev/null @@ -1,206 +0,0 @@ -package market - -import ( - addr "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/go-state-types/builtin" - "github.com/filecoin-project/go-state-types/builtin/v8/util/adt" - "github.com/filecoin-project/go-state-types/exitcode" - - "github.com/filecoin-project/go-state-types/abi" - "github.com/ipfs/go-cid" - xerrors "golang.org/x/xerrors" -) - -const EpochUndefined = abi.ChainEpoch(-1) - -// Bitwidth of AMTs determined empirically from mutation patterns and projections of mainnet data. -const ProposalsAmtBitwidth = 5 -const StatesAmtBitwidth = 6 - -type State struct { - // Proposals are deals that have been proposed and not yet cleaned up after expiry or termination. - Proposals cid.Cid // AMT[DealID]DealProposal - // States contains state for deals that have been activated and not yet cleaned up after expiry or termination. - // After expiration, the state exists until the proposal is cleaned up too. - // Invariant: keys(States) ⊆ keys(Proposals). - States cid.Cid // AMT[DealID]DealState - - // PendingProposals tracks dealProposals that have not yet reached their deal start date. - // We track them here to ensure that miners can't publish the same deal proposal twice - PendingProposals cid.Cid // Set[DealCid] - - // Total amount held in escrow, indexed by actor address (including both locked and unlocked amounts). - EscrowTable cid.Cid // BalanceTable - - // Amount locked, indexed by actor address. - // Note: the amounts in this table do not affect the overall amount in escrow: - // only the _portion_ of the total escrow amount that is locked. - LockedTable cid.Cid // BalanceTable - - NextID abi.DealID - - // Metadata cached for efficient iteration over deals. - DealOpsByEpoch cid.Cid // SetMultimap, HAMT[epoch]Set - LastCron abi.ChainEpoch - - // Total Client Collateral that is locked -> unlocked when deal is terminated - TotalClientLockedCollateral abi.TokenAmount - // Total Provider Collateral that is locked -> unlocked when deal is terminated - TotalProviderLockedCollateral abi.TokenAmount - // Total storage fee that is locked in escrow -> unlocked when payments are made - TotalClientStorageFee abi.TokenAmount -} - -func ConstructState(store adt.Store) (*State, error) { - emptyProposalsArrayCid, err := adt.StoreEmptyArray(store, ProposalsAmtBitwidth) - if err != nil { - return nil, xerrors.Errorf("failed to create empty array: %w", err) - } - emptyStatesArrayCid, err := adt.StoreEmptyArray(store, StatesAmtBitwidth) - if err != nil { - return nil, xerrors.Errorf("failed to create empty states array: %w", err) - } - - emptyPendingProposalsMapCid, err := adt.StoreEmptyMap(store, builtin.DefaultHamtBitwidth) - if err != nil { - return nil, xerrors.Errorf("failed to create empty map: %w", err) - } - emptyDealOpsHamtCid, err := StoreEmptySetMultimap(store, builtin.DefaultHamtBitwidth) - if err != nil { - return nil, xerrors.Errorf("failed to create empty multiset: %w", err) - } - emptyBalanceTableCid, err := adt.StoreEmptyMap(store, adt.BalanceTableBitwidth) - if err != nil { - return nil, xerrors.Errorf("failed to create empty balance table: %w", err) - } - - return &State{ - Proposals: emptyProposalsArrayCid, - States: emptyStatesArrayCid, - PendingProposals: emptyPendingProposalsMapCid, - EscrowTable: emptyBalanceTableCid, - LockedTable: emptyBalanceTableCid, - NextID: abi.DealID(0), - DealOpsByEpoch: emptyDealOpsHamtCid, - LastCron: abi.ChainEpoch(-1), - - TotalClientLockedCollateral: abi.NewTokenAmount(0), - TotalProviderLockedCollateral: abi.NewTokenAmount(0), - TotalClientStorageFee: abi.NewTokenAmount(0), - }, nil -} - -// A specialization of a array to deals. -// It is an error to query for a key that doesn't exist. -type DealArray struct { - *adt.Array -} - -// Interprets a store as balance table with root `r`. -func AsDealProposalArray(s adt.Store, r cid.Cid) (*DealArray, error) { - a, err := adt.AsArray(s, r, ProposalsAmtBitwidth) - if err != nil { - return nil, err - } - return &DealArray{a}, nil -} - -func (d *DealArray) GetDealProposal(dealID abi.DealID) (*DealProposal, error) { - proposal, found, err := d.Get(dealID) - if err != nil { - return nil, xerrors.Errorf("failed to load proposal: %w", err) - } - if !found { - return nil, exitcode.ErrNotFound.Wrapf("no such deal %d", dealID) - } - - return proposal, nil -} - -// Returns the root cid of underlying AMT. -func (t *DealArray) Root() (cid.Cid, error) { - return t.Array.Root() -} - -// Gets the deal for a key. The entry must have been previously initialized. -func (t *DealArray) Get(id abi.DealID) (*DealProposal, bool, error) { - var value DealProposal - found, err := t.Array.Get(uint64(id), &value) - return &value, found, err -} - -func (t *DealArray) Set(k abi.DealID, value *DealProposal) error { - return t.Array.Set(uint64(k), value) -} - -func (t *DealArray) Delete(id abi.DealID) error { - return t.Array.Delete(uint64(id)) -} - -// Validates a collection of deal dealProposals for activation, and returns their combined weight, -// split into regular deal weight and verified deal weight. -func ValidateDealsForActivation( - st *State, store adt.Store, dealIDs []abi.DealID, minerAddr addr.Address, sectorExpiry, currEpoch abi.ChainEpoch, -) (big.Int, big.Int, uint64, error) { - proposals, err := AsDealProposalArray(store, st.Proposals) - if err != nil { - return big.Int{}, big.Int{}, 0, xerrors.Errorf("failed to load dealProposals: %w", err) - } - - return validateAndComputeDealWeight(proposals, dealIDs, minerAddr, sectorExpiry, currEpoch) -} - -//////////////////////////////////////////////////////////////////////////////// -// Checks -//////////////////////////////////////////////////////////////////////////////// - -func validateAndComputeDealWeight(proposals *DealArray, dealIDs []abi.DealID, minerAddr addr.Address, - sectorExpiry abi.ChainEpoch, sectorActivation abi.ChainEpoch) (big.Int, big.Int, uint64, error) { - - seenDealIDs := make(map[abi.DealID]struct{}, len(dealIDs)) - totalDealSpace := uint64(0) - totalDealSpaceTime := big.Zero() - totalVerifiedSpaceTime := big.Zero() - for _, dealID := range dealIDs { - // Make sure we don't double-count deals. - if _, seen := seenDealIDs[dealID]; seen { - return big.Int{}, big.Int{}, 0, exitcode.ErrIllegalArgument.Wrapf("deal ID %d present multiple times", dealID) - } - seenDealIDs[dealID] = struct{}{} - - proposal, found, err := proposals.Get(dealID) - if err != nil { - return big.Int{}, big.Int{}, 0, xerrors.Errorf("failed to load deal %d: %w", dealID, err) - } - if !found { - return big.Int{}, big.Int{}, 0, exitcode.ErrNotFound.Wrapf("no such deal %d", dealID) - } - if err = validateDealCanActivate(proposal, minerAddr, sectorExpiry, sectorActivation); err != nil { - return big.Int{}, big.Int{}, 0, xerrors.Errorf("cannot activate deal %d: %w", dealID, err) - } - - // Compute deal weight - totalDealSpace += uint64(proposal.PieceSize) - dealSpaceTime := DealWeight(proposal) - if proposal.VerifiedDeal { - totalVerifiedSpaceTime = big.Add(totalVerifiedSpaceTime, dealSpaceTime) - } else { - totalDealSpaceTime = big.Add(totalDealSpaceTime, dealSpaceTime) - } - } - return totalDealSpaceTime, totalVerifiedSpaceTime, totalDealSpace, nil -} - -func validateDealCanActivate(proposal *DealProposal, minerAddr addr.Address, sectorExpiration, sectorActivation abi.ChainEpoch) error { - if proposal.Provider != minerAddr { - return exitcode.ErrForbidden.Wrapf("proposal has provider %v, must be %v", proposal.Provider, minerAddr) - } - if sectorActivation > proposal.StartEpoch { - return exitcode.ErrIllegalArgument.Wrapf("proposal start epoch %d has already elapsed at %d", proposal.StartEpoch, sectorActivation) - } - if proposal.EndEpoch > sectorExpiration { - return exitcode.ErrIllegalArgument.Wrapf("proposal expiration %d exceeds sector expiration %d", proposal.EndEpoch, sectorExpiration) - } - return nil -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/market/market_types.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/market/market_types.go deleted file mode 100644 index f69b33a..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/market/market_types.go +++ /dev/null @@ -1,67 +0,0 @@ -package market - -import ( - addr "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-bitfield" - "github.com/filecoin-project/go-state-types/abi" - cbg "github.com/whyrusleeping/cbor-gen" -) - -type WithdrawBalanceParams struct { - ProviderOrClientAddress addr.Address - Amount abi.TokenAmount -} - -type PublishStorageDealsParams struct { - Deals []ClientDealProposal -} - -type PublishStorageDealsReturn struct { - IDs []abi.DealID - ValidDeals bitfield.BitField -} - -// - Array of sectors rather than just one -// - Removed SectorStart (which is unknown at call time) -type VerifyDealsForActivationParams struct { - Sectors []SectorDeals -} - -type SectorDeals struct { - SectorExpiry abi.ChainEpoch - DealIDs []abi.DealID -} - -// - Array of sectors weights -type VerifyDealsForActivationReturn struct { - Sectors []SectorWeights -} - -type SectorWeights struct { - DealSpace uint64 // Total space in bytes of submitted deals. - DealWeight abi.DealWeight // Total space*time of submitted deals. - VerifiedDealWeight abi.DealWeight // Total space*time of submitted verified deals. -} - -type ActivateDealsParams struct { - DealIDs []abi.DealID - SectorExpiry abi.ChainEpoch -} - -type SectorDataSpec struct { - DealIDs []abi.DealID - SectorType abi.RegisteredSealProof -} - -type ComputeDataCommitmentParams struct { - Inputs []*SectorDataSpec -} - -type ComputeDataCommitmentReturn struct { - CommDs []cbg.CborCid -} - -type OnMinerSectorsTerminateParams struct { - Epoch abi.ChainEpoch - DealIDs []abi.DealID -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/market/methods.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/market/methods.go deleted file mode 100644 index 8e5f9de..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/market/methods.go +++ /dev/null @@ -1,18 +0,0 @@ -package market - -import ( - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" -) - -var Methods = []interface{}{ - 1: *new(func(interface{}, *abi.EmptyValue) *abi.EmptyValue), // Constructor - 2: *new(func(interface{}, *address.Address) *abi.EmptyValue), // AddBalance - 3: *new(func(interface{}, *WithdrawBalanceParams) *abi.TokenAmount), // WithdrawBalance - 4: *new(func(interface{}, *PublishStorageDealsParams) *PublishStorageDealsReturn), // PublishStorageDeals - 5: *new(func(interface{}, *VerifyDealsForActivationParams) *VerifyDealsForActivationReturn), // VerifyDealsForActivation - 6: *new(func(interface{}, *ActivateDealsParams) *abi.EmptyValue), // ActivateDeals - 7: *new(func(interface{}, *OnMinerSectorsTerminateParams) *abi.EmptyValue), // OnMinerSectorsTerminate - 8: *new(func(interface{}, *ComputeDataCommitmentParams) *ComputeDataCommitmentReturn), // ComputeDataCommitment - 9: *new(func(interface{}, *abi.EmptyValue) *abi.EmptyValue), // CronTick -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/market/policy.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/market/policy.go deleted file mode 100644 index f8cd654..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/market/policy.go +++ /dev/null @@ -1,61 +0,0 @@ -package market - -import ( - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/go-state-types/builtin" -) - -// The percentage of normalized cirulating -// supply that must be covered by provider collateral in a deal -var ProviderCollateralSupplyTarget = builtin.BigFrac{ - Numerator: big.NewInt(1), // PARAM_SPEC - Denominator: big.NewInt(100), -} - -// Minimum deal duration. -var DealMinDuration = abi.ChainEpoch(180 * builtin.EpochsInDay) // PARAM_SPEC - -// Maximum deal duration -var DealMaxDuration = abi.ChainEpoch(540 * builtin.EpochsInDay) // PARAM_SPEC - -// Bounds (inclusive) on deal duration -func DealDurationBounds(_ abi.PaddedPieceSize) (min abi.ChainEpoch, max abi.ChainEpoch) { - return DealMinDuration, DealMaxDuration -} - -// DealMaxLabelSize is the maximum size of a deal label. -const DealMaxLabelSize = 256 - -func DealPricePerEpochBounds(_ abi.PaddedPieceSize, _ abi.ChainEpoch) (min abi.TokenAmount, max abi.TokenAmount) { - return abi.NewTokenAmount(0), builtin.TotalFilecoin -} - -func DealProviderCollateralBounds(pieceSize abi.PaddedPieceSize, verified bool, networkRawPower, networkQAPower, baselinePower abi.StoragePower, - networkCirculatingSupply abi.TokenAmount) (min, max abi.TokenAmount) { - // minimumProviderCollateral = ProviderCollateralSupplyTarget * normalizedCirculatingSupply - // normalizedCirculatingSupply = networkCirculatingSupply * dealPowerShare - // dealPowerShare = dealRawPower / max(BaselinePower(t), NetworkRawPower(t), dealRawPower) - - lockTargetNum := big.Mul(ProviderCollateralSupplyTarget.Numerator, networkCirculatingSupply) - lockTargetDenom := ProviderCollateralSupplyTarget.Denominator - powerShareNum := big.NewIntUnsigned(uint64(pieceSize)) - powerShareDenom := big.Max(big.Max(networkRawPower, baselinePower), powerShareNum) - - num := big.Mul(lockTargetNum, powerShareNum) - denom := big.Mul(lockTargetDenom, powerShareDenom) - minCollateral := big.Div(num, denom) - return minCollateral, builtin.TotalFilecoin -} - -func DealClientCollateralBounds(_ abi.PaddedPieceSize, _ abi.ChainEpoch) (min abi.TokenAmount, max abi.TokenAmount) { - return abi.NewTokenAmount(0), builtin.TotalFilecoin -} - -// Computes the weight for a deal proposal, which is a function of its size and duration. -func DealWeight(proposal *DealProposal) abi.DealWeight { - dealDuration := big.NewInt(int64(proposal.Duration())) - dealSize := big.NewIntUnsigned(uint64(proposal.PieceSize)) - dealSpaceTime := big.Mul(dealDuration, dealSize) - return dealSpaceTime -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/market/set_multimap.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/market/set_multimap.go deleted file mode 100644 index cca7d9b..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/market/set_multimap.go +++ /dev/null @@ -1,37 +0,0 @@ -package market - -import ( - "github.com/filecoin-project/go-state-types/builtin/v8/util/adt" - - cid "github.com/ipfs/go-cid" -) - -type SetMultimap struct { - mp *adt.Map - store adt.Store - innerBitwidth int -} - -// Creates a new map backed by an empty HAMT and flushes it to the store. -// Both inner and outer HAMTs have branching factor 2^bitwidth. -func MakeEmptySetMultimap(s adt.Store, bitwidth int) (*SetMultimap, error) { - m, err := adt.MakeEmptyMap(s, bitwidth) - if err != nil { - return nil, err - } - return &SetMultimap{mp: m, store: s, innerBitwidth: bitwidth}, nil -} - -// Writes a new empty map to the store and returns its CID. -func StoreEmptySetMultimap(s adt.Store, bitwidth int) (cid.Cid, error) { - mm, err := MakeEmptySetMultimap(s, bitwidth) - if err != nil { - return cid.Undef, err - } - return mm.Root() -} - -// Returns the root cid of the underlying HAMT. -func (mm *SetMultimap) Root() (cid.Cid, error) { - return mm.mp.Root() -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/miner/cbor_gen.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/miner/cbor_gen.go deleted file mode 100644 index 1c453b1..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/miner/cbor_gen.go +++ /dev/null @@ -1,5034 +0,0 @@ -// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. - -package miner - -import ( - "fmt" - "io" - - address "github.com/filecoin-project/go-address" - abi "github.com/filecoin-project/go-state-types/abi" - proof "github.com/filecoin-project/go-state-types/proof" - cid "github.com/ipfs/go-cid" - cbg "github.com/whyrusleeping/cbor-gen" - xerrors "golang.org/x/xerrors" -) - -var _ = xerrors.Errorf - -var lengthBufState = []byte{143} - -func (t *State) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufState); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Info (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.Info); err != nil { - return xerrors.Errorf("failed to write cid field t.Info: %w", err) - } - - // t.PreCommitDeposits (big.Int) (struct) - if err := t.PreCommitDeposits.MarshalCBOR(w); err != nil { - return err - } - - // t.LockedFunds (big.Int) (struct) - if err := t.LockedFunds.MarshalCBOR(w); err != nil { - return err - } - - // t.VestingFunds (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.VestingFunds); err != nil { - return xerrors.Errorf("failed to write cid field t.VestingFunds: %w", err) - } - - // t.FeeDebt (big.Int) (struct) - if err := t.FeeDebt.MarshalCBOR(w); err != nil { - return err - } - - // t.InitialPledge (big.Int) (struct) - if err := t.InitialPledge.MarshalCBOR(w); err != nil { - return err - } - - // t.PreCommittedSectors (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.PreCommittedSectors); err != nil { - return xerrors.Errorf("failed to write cid field t.PreCommittedSectors: %w", err) - } - - // t.PreCommittedSectorsCleanUp (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.PreCommittedSectorsCleanUp); err != nil { - return xerrors.Errorf("failed to write cid field t.PreCommittedSectorsCleanUp: %w", err) - } - - // t.AllocatedSectors (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.AllocatedSectors); err != nil { - return xerrors.Errorf("failed to write cid field t.AllocatedSectors: %w", err) - } - - // t.Sectors (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.Sectors); err != nil { - return xerrors.Errorf("failed to write cid field t.Sectors: %w", err) - } - - // t.ProvingPeriodStart (abi.ChainEpoch) (int64) - if t.ProvingPeriodStart >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.ProvingPeriodStart)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.ProvingPeriodStart-1)); err != nil { - return err - } - } - - // t.CurrentDeadline (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.CurrentDeadline)); err != nil { - return err - } - - // t.Deadlines (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.Deadlines); err != nil { - return xerrors.Errorf("failed to write cid field t.Deadlines: %w", err) - } - - // t.EarlyTerminations (bitfield.BitField) (struct) - if err := t.EarlyTerminations.MarshalCBOR(w); err != nil { - return err - } - - // t.DeadlineCronActive (bool) (bool) - if err := cbg.WriteBool(w, t.DeadlineCronActive); err != nil { - return err - } - return nil -} - -func (t *State) UnmarshalCBOR(r io.Reader) error { - *t = State{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 15 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Info (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.Info: %w", err) - } - - t.Info = c - - } - // t.PreCommitDeposits (big.Int) (struct) - - { - - if err := t.PreCommitDeposits.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.PreCommitDeposits: %w", err) - } - - } - // t.LockedFunds (big.Int) (struct) - - { - - if err := t.LockedFunds.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.LockedFunds: %w", err) - } - - } - // t.VestingFunds (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.VestingFunds: %w", err) - } - - t.VestingFunds = c - - } - // t.FeeDebt (big.Int) (struct) - - { - - if err := t.FeeDebt.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.FeeDebt: %w", err) - } - - } - // t.InitialPledge (big.Int) (struct) - - { - - if err := t.InitialPledge.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.InitialPledge: %w", err) - } - - } - // t.PreCommittedSectors (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.PreCommittedSectors: %w", err) - } - - t.PreCommittedSectors = c - - } - // t.PreCommittedSectorsCleanUp (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.PreCommittedSectorsCleanUp: %w", err) - } - - t.PreCommittedSectorsCleanUp = c - - } - // t.AllocatedSectors (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.AllocatedSectors: %w", err) - } - - t.AllocatedSectors = c - - } - // t.Sectors (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.Sectors: %w", err) - } - - t.Sectors = c - - } - // t.ProvingPeriodStart (abi.ChainEpoch) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.ProvingPeriodStart = abi.ChainEpoch(extraI) - } - // t.CurrentDeadline (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.CurrentDeadline = uint64(extra) - - } - // t.Deadlines (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.Deadlines: %w", err) - } - - t.Deadlines = c - - } - // t.EarlyTerminations (bitfield.BitField) (struct) - - { - - if err := t.EarlyTerminations.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.EarlyTerminations: %w", err) - } - - } - // t.DeadlineCronActive (bool) (bool) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajOther { - return fmt.Errorf("booleans must be major type 7") - } - switch extra { - case 20: - t.DeadlineCronActive = false - case 21: - t.DeadlineCronActive = true - default: - return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) - } - return nil -} - -var lengthBufMinerInfo = []byte{139} - -func (t *MinerInfo) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufMinerInfo); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Owner (address.Address) (struct) - if err := t.Owner.MarshalCBOR(w); err != nil { - return err - } - - // t.Worker (address.Address) (struct) - if err := t.Worker.MarshalCBOR(w); err != nil { - return err - } - - // t.ControlAddresses ([]address.Address) (slice) - if len(t.ControlAddresses) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.ControlAddresses was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.ControlAddresses))); err != nil { - return err - } - for _, v := range t.ControlAddresses { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - - // t.PendingWorkerKey (miner.WorkerKeyChange) (struct) - if err := t.PendingWorkerKey.MarshalCBOR(w); err != nil { - return err - } - - // t.PeerId ([]uint8) (slice) - if len(t.PeerId) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.PeerId was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.PeerId))); err != nil { - return err - } - - if _, err := w.Write(t.PeerId[:]); err != nil { - return err - } - - // t.Multiaddrs ([][]uint8) (slice) - if len(t.Multiaddrs) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Multiaddrs was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Multiaddrs))); err != nil { - return err - } - for _, v := range t.Multiaddrs { - if len(v) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field v was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(v))); err != nil { - return err - } - - if _, err := w.Write(v[:]); err != nil { - return err - } - } - - // t.WindowPoStProofType (abi.RegisteredPoStProof) (int64) - if t.WindowPoStProofType >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.WindowPoStProofType)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.WindowPoStProofType-1)); err != nil { - return err - } - } - - // t.SectorSize (abi.SectorSize) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SectorSize)); err != nil { - return err - } - - // t.WindowPoStPartitionSectors (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.WindowPoStPartitionSectors)); err != nil { - return err - } - - // t.ConsensusFaultElapsed (abi.ChainEpoch) (int64) - if t.ConsensusFaultElapsed >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.ConsensusFaultElapsed)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.ConsensusFaultElapsed-1)); err != nil { - return err - } - } - - // t.PendingOwnerAddress (address.Address) (struct) - if err := t.PendingOwnerAddress.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *MinerInfo) UnmarshalCBOR(r io.Reader) error { - *t = MinerInfo{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 11 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Owner (address.Address) (struct) - - { - - if err := t.Owner.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Owner: %w", err) - } - - } - // t.Worker (address.Address) (struct) - - { - - if err := t.Worker.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Worker: %w", err) - } - - } - // t.ControlAddresses ([]address.Address) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.ControlAddresses: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.ControlAddresses = make([]address.Address, extra) - } - - for i := 0; i < int(extra); i++ { - - var v address.Address - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.ControlAddresses[i] = v - } - - // t.PendingWorkerKey (miner.WorkerKeyChange) (struct) - - { - - b, err := br.ReadByte() - if err != nil { - return err - } - if b != cbg.CborNull[0] { - if err := br.UnreadByte(); err != nil { - return err - } - t.PendingWorkerKey = new(WorkerKeyChange) - if err := t.PendingWorkerKey.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.PendingWorkerKey pointer: %w", err) - } - } - - } - // t.PeerId ([]uint8) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.PeerId: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - - if extra > 0 { - t.PeerId = make([]uint8, extra) - } - - if _, err := io.ReadFull(br, t.PeerId[:]); err != nil { - return err - } - // t.Multiaddrs ([][]uint8) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Multiaddrs: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Multiaddrs = make([][]uint8, extra) - } - - for i := 0; i < int(extra); i++ { - { - var maj byte - var extra uint64 - var err error - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.Multiaddrs[i]: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - - if extra > 0 { - t.Multiaddrs[i] = make([]uint8, extra) - } - - if _, err := io.ReadFull(br, t.Multiaddrs[i][:]); err != nil { - return err - } - } - } - - // t.WindowPoStProofType (abi.RegisteredPoStProof) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.WindowPoStProofType = abi.RegisteredPoStProof(extraI) - } - // t.SectorSize (abi.SectorSize) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.SectorSize = abi.SectorSize(extra) - - } - // t.WindowPoStPartitionSectors (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.WindowPoStPartitionSectors = uint64(extra) - - } - // t.ConsensusFaultElapsed (abi.ChainEpoch) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.ConsensusFaultElapsed = abi.ChainEpoch(extraI) - } - // t.PendingOwnerAddress (address.Address) (struct) - - { - - b, err := br.ReadByte() - if err != nil { - return err - } - if b != cbg.CborNull[0] { - if err := br.UnreadByte(); err != nil { - return err - } - t.PendingOwnerAddress = new(address.Address) - if err := t.PendingOwnerAddress.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.PendingOwnerAddress pointer: %w", err) - } - } - - } - return nil -} - -var lengthBufDeadlines = []byte{129} - -func (t *Deadlines) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufDeadlines); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Due ([48]cid.Cid) (array) - if len(t.Due) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Due was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Due))); err != nil { - return err - } - for _, v := range t.Due { - if err := cbg.WriteCidBuf(scratch, w, v); err != nil { - return xerrors.Errorf("failed writing cid field t.Due: %w", err) - } - } - return nil -} - -func (t *Deadlines) UnmarshalCBOR(r io.Reader) error { - *t = Deadlines{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Due ([48]cid.Cid) (array) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Due: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra != 48 { - return fmt.Errorf("expected array to have 48 elements") - } - - t.Due = [48]cid.Cid{} - - for i := 0; i < int(extra); i++ { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("reading cid field t.Due failed: %w", err) - } - t.Due[i] = c - } - - return nil -} - -var lengthBufDeadline = []byte{139} - -func (t *Deadline) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufDeadline); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Partitions (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.Partitions); err != nil { - return xerrors.Errorf("failed to write cid field t.Partitions: %w", err) - } - - // t.ExpirationsEpochs (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.ExpirationsEpochs); err != nil { - return xerrors.Errorf("failed to write cid field t.ExpirationsEpochs: %w", err) - } - - // t.PartitionsPoSted (bitfield.BitField) (struct) - if err := t.PartitionsPoSted.MarshalCBOR(w); err != nil { - return err - } - - // t.EarlyTerminations (bitfield.BitField) (struct) - if err := t.EarlyTerminations.MarshalCBOR(w); err != nil { - return err - } - - // t.LiveSectors (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.LiveSectors)); err != nil { - return err - } - - // t.TotalSectors (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.TotalSectors)); err != nil { - return err - } - - // t.FaultyPower (miner.PowerPair) (struct) - if err := t.FaultyPower.MarshalCBOR(w); err != nil { - return err - } - - // t.OptimisticPoStSubmissions (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.OptimisticPoStSubmissions); err != nil { - return xerrors.Errorf("failed to write cid field t.OptimisticPoStSubmissions: %w", err) - } - - // t.SectorsSnapshot (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.SectorsSnapshot); err != nil { - return xerrors.Errorf("failed to write cid field t.SectorsSnapshot: %w", err) - } - - // t.PartitionsSnapshot (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.PartitionsSnapshot); err != nil { - return xerrors.Errorf("failed to write cid field t.PartitionsSnapshot: %w", err) - } - - // t.OptimisticPoStSubmissionsSnapshot (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.OptimisticPoStSubmissionsSnapshot); err != nil { - return xerrors.Errorf("failed to write cid field t.OptimisticPoStSubmissionsSnapshot: %w", err) - } - - return nil -} - -func (t *Deadline) UnmarshalCBOR(r io.Reader) error { - *t = Deadline{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 11 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Partitions (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.Partitions: %w", err) - } - - t.Partitions = c - - } - // t.ExpirationsEpochs (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.ExpirationsEpochs: %w", err) - } - - t.ExpirationsEpochs = c - - } - // t.PartitionsPoSted (bitfield.BitField) (struct) - - { - - if err := t.PartitionsPoSted.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.PartitionsPoSted: %w", err) - } - - } - // t.EarlyTerminations (bitfield.BitField) (struct) - - { - - if err := t.EarlyTerminations.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.EarlyTerminations: %w", err) - } - - } - // t.LiveSectors (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.LiveSectors = uint64(extra) - - } - // t.TotalSectors (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.TotalSectors = uint64(extra) - - } - // t.FaultyPower (miner.PowerPair) (struct) - - { - - if err := t.FaultyPower.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.FaultyPower: %w", err) - } - - } - // t.OptimisticPoStSubmissions (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.OptimisticPoStSubmissions: %w", err) - } - - t.OptimisticPoStSubmissions = c - - } - // t.SectorsSnapshot (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.SectorsSnapshot: %w", err) - } - - t.SectorsSnapshot = c - - } - // t.PartitionsSnapshot (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.PartitionsSnapshot: %w", err) - } - - t.PartitionsSnapshot = c - - } - // t.OptimisticPoStSubmissionsSnapshot (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.OptimisticPoStSubmissionsSnapshot: %w", err) - } - - t.OptimisticPoStSubmissionsSnapshot = c - - } - return nil -} - -var lengthBufPartition = []byte{139} - -func (t *Partition) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufPartition); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Sectors (bitfield.BitField) (struct) - if err := t.Sectors.MarshalCBOR(w); err != nil { - return err - } - - // t.Unproven (bitfield.BitField) (struct) - if err := t.Unproven.MarshalCBOR(w); err != nil { - return err - } - - // t.Faults (bitfield.BitField) (struct) - if err := t.Faults.MarshalCBOR(w); err != nil { - return err - } - - // t.Recoveries (bitfield.BitField) (struct) - if err := t.Recoveries.MarshalCBOR(w); err != nil { - return err - } - - // t.Terminated (bitfield.BitField) (struct) - if err := t.Terminated.MarshalCBOR(w); err != nil { - return err - } - - // t.ExpirationsEpochs (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.ExpirationsEpochs); err != nil { - return xerrors.Errorf("failed to write cid field t.ExpirationsEpochs: %w", err) - } - - // t.EarlyTerminated (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.EarlyTerminated); err != nil { - return xerrors.Errorf("failed to write cid field t.EarlyTerminated: %w", err) - } - - // t.LivePower (miner.PowerPair) (struct) - if err := t.LivePower.MarshalCBOR(w); err != nil { - return err - } - - // t.UnprovenPower (miner.PowerPair) (struct) - if err := t.UnprovenPower.MarshalCBOR(w); err != nil { - return err - } - - // t.FaultyPower (miner.PowerPair) (struct) - if err := t.FaultyPower.MarshalCBOR(w); err != nil { - return err - } - - // t.RecoveringPower (miner.PowerPair) (struct) - if err := t.RecoveringPower.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *Partition) UnmarshalCBOR(r io.Reader) error { - *t = Partition{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 11 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Sectors (bitfield.BitField) (struct) - - { - - if err := t.Sectors.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Sectors: %w", err) - } - - } - // t.Unproven (bitfield.BitField) (struct) - - { - - if err := t.Unproven.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Unproven: %w", err) - } - - } - // t.Faults (bitfield.BitField) (struct) - - { - - if err := t.Faults.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Faults: %w", err) - } - - } - // t.Recoveries (bitfield.BitField) (struct) - - { - - if err := t.Recoveries.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Recoveries: %w", err) - } - - } - // t.Terminated (bitfield.BitField) (struct) - - { - - if err := t.Terminated.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Terminated: %w", err) - } - - } - // t.ExpirationsEpochs (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.ExpirationsEpochs: %w", err) - } - - t.ExpirationsEpochs = c - - } - // t.EarlyTerminated (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.EarlyTerminated: %w", err) - } - - t.EarlyTerminated = c - - } - // t.LivePower (miner.PowerPair) (struct) - - { - - if err := t.LivePower.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.LivePower: %w", err) - } - - } - // t.UnprovenPower (miner.PowerPair) (struct) - - { - - if err := t.UnprovenPower.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.UnprovenPower: %w", err) - } - - } - // t.FaultyPower (miner.PowerPair) (struct) - - { - - if err := t.FaultyPower.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.FaultyPower: %w", err) - } - - } - // t.RecoveringPower (miner.PowerPair) (struct) - - { - - if err := t.RecoveringPower.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.RecoveringPower: %w", err) - } - - } - return nil -} - -var lengthBufExpirationSet = []byte{133} - -func (t *ExpirationSet) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufExpirationSet); err != nil { - return err - } - - // t.OnTimeSectors (bitfield.BitField) (struct) - if err := t.OnTimeSectors.MarshalCBOR(w); err != nil { - return err - } - - // t.EarlySectors (bitfield.BitField) (struct) - if err := t.EarlySectors.MarshalCBOR(w); err != nil { - return err - } - - // t.OnTimePledge (big.Int) (struct) - if err := t.OnTimePledge.MarshalCBOR(w); err != nil { - return err - } - - // t.ActivePower (miner.PowerPair) (struct) - if err := t.ActivePower.MarshalCBOR(w); err != nil { - return err - } - - // t.FaultyPower (miner.PowerPair) (struct) - if err := t.FaultyPower.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *ExpirationSet) UnmarshalCBOR(r io.Reader) error { - *t = ExpirationSet{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 5 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.OnTimeSectors (bitfield.BitField) (struct) - - { - - if err := t.OnTimeSectors.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.OnTimeSectors: %w", err) - } - - } - // t.EarlySectors (bitfield.BitField) (struct) - - { - - if err := t.EarlySectors.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.EarlySectors: %w", err) - } - - } - // t.OnTimePledge (big.Int) (struct) - - { - - if err := t.OnTimePledge.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.OnTimePledge: %w", err) - } - - } - // t.ActivePower (miner.PowerPair) (struct) - - { - - if err := t.ActivePower.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.ActivePower: %w", err) - } - - } - // t.FaultyPower (miner.PowerPair) (struct) - - { - - if err := t.FaultyPower.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.FaultyPower: %w", err) - } - - } - return nil -} - -var lengthBufPowerPair = []byte{130} - -func (t *PowerPair) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufPowerPair); err != nil { - return err - } - - // t.Raw (big.Int) (struct) - if err := t.Raw.MarshalCBOR(w); err != nil { - return err - } - - // t.QA (big.Int) (struct) - if err := t.QA.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *PowerPair) UnmarshalCBOR(r io.Reader) error { - *t = PowerPair{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Raw (big.Int) (struct) - - { - - if err := t.Raw.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Raw: %w", err) - } - - } - // t.QA (big.Int) (struct) - - { - - if err := t.QA.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.QA: %w", err) - } - - } - return nil -} - -var lengthBufSectorPreCommitOnChainInfo = []byte{133} - -func (t *SectorPreCommitOnChainInfo) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufSectorPreCommitOnChainInfo); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Info (miner.SectorPreCommitInfo) (struct) - if err := t.Info.MarshalCBOR(w); err != nil { - return err - } - - // t.PreCommitDeposit (big.Int) (struct) - if err := t.PreCommitDeposit.MarshalCBOR(w); err != nil { - return err - } - - // t.PreCommitEpoch (abi.ChainEpoch) (int64) - if t.PreCommitEpoch >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.PreCommitEpoch)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.PreCommitEpoch-1)); err != nil { - return err - } - } - - // t.DealWeight (big.Int) (struct) - if err := t.DealWeight.MarshalCBOR(w); err != nil { - return err - } - - // t.VerifiedDealWeight (big.Int) (struct) - if err := t.VerifiedDealWeight.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *SectorPreCommitOnChainInfo) UnmarshalCBOR(r io.Reader) error { - *t = SectorPreCommitOnChainInfo{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 5 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Info (miner.SectorPreCommitInfo) (struct) - - { - - if err := t.Info.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Info: %w", err) - } - - } - // t.PreCommitDeposit (big.Int) (struct) - - { - - if err := t.PreCommitDeposit.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.PreCommitDeposit: %w", err) - } - - } - // t.PreCommitEpoch (abi.ChainEpoch) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.PreCommitEpoch = abi.ChainEpoch(extraI) - } - // t.DealWeight (big.Int) (struct) - - { - - if err := t.DealWeight.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.DealWeight: %w", err) - } - - } - // t.VerifiedDealWeight (big.Int) (struct) - - { - - if err := t.VerifiedDealWeight.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.VerifiedDealWeight: %w", err) - } - - } - return nil -} - -var lengthBufSectorPreCommitInfo = []byte{138} - -func (t *SectorPreCommitInfo) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufSectorPreCommitInfo); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.SealProof (abi.RegisteredSealProof) (int64) - if t.SealProof >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SealProof)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.SealProof-1)); err != nil { - return err - } - } - - // t.SectorNumber (abi.SectorNumber) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SectorNumber)); err != nil { - return err - } - - // t.SealedCID (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.SealedCID); err != nil { - return xerrors.Errorf("failed to write cid field t.SealedCID: %w", err) - } - - // t.SealRandEpoch (abi.ChainEpoch) (int64) - if t.SealRandEpoch >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SealRandEpoch)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.SealRandEpoch-1)); err != nil { - return err - } - } - - // t.DealIDs ([]abi.DealID) (slice) - if len(t.DealIDs) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.DealIDs was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.DealIDs))); err != nil { - return err - } - for _, v := range t.DealIDs { - if err := cbg.CborWriteHeader(w, cbg.MajUnsignedInt, uint64(v)); err != nil { - return err - } - } - - // t.Expiration (abi.ChainEpoch) (int64) - if t.Expiration >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Expiration)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Expiration-1)); err != nil { - return err - } - } - - // t.ReplaceCapacity (bool) (bool) - if err := cbg.WriteBool(w, t.ReplaceCapacity); err != nil { - return err - } - - // t.ReplaceSectorDeadline (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.ReplaceSectorDeadline)); err != nil { - return err - } - - // t.ReplaceSectorPartition (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.ReplaceSectorPartition)); err != nil { - return err - } - - // t.ReplaceSectorNumber (abi.SectorNumber) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.ReplaceSectorNumber)); err != nil { - return err - } - - return nil -} - -func (t *SectorPreCommitInfo) UnmarshalCBOR(r io.Reader) error { - *t = SectorPreCommitInfo{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 10 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.SealProof (abi.RegisteredSealProof) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.SealProof = abi.RegisteredSealProof(extraI) - } - // t.SectorNumber (abi.SectorNumber) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.SectorNumber = abi.SectorNumber(extra) - - } - // t.SealedCID (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.SealedCID: %w", err) - } - - t.SealedCID = c - - } - // t.SealRandEpoch (abi.ChainEpoch) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.SealRandEpoch = abi.ChainEpoch(extraI) - } - // t.DealIDs ([]abi.DealID) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.DealIDs: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.DealIDs = make([]abi.DealID, extra) - } - - for i := 0; i < int(extra); i++ { - - maj, val, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return xerrors.Errorf("failed to read uint64 for t.DealIDs slice: %w", err) - } - - if maj != cbg.MajUnsignedInt { - return xerrors.Errorf("value read for array t.DealIDs was not a uint, instead got %d", maj) - } - - t.DealIDs[i] = abi.DealID(val) - } - - // t.Expiration (abi.ChainEpoch) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.Expiration = abi.ChainEpoch(extraI) - } - // t.ReplaceCapacity (bool) (bool) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajOther { - return fmt.Errorf("booleans must be major type 7") - } - switch extra { - case 20: - t.ReplaceCapacity = false - case 21: - t.ReplaceCapacity = true - default: - return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) - } - // t.ReplaceSectorDeadline (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.ReplaceSectorDeadline = uint64(extra) - - } - // t.ReplaceSectorPartition (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.ReplaceSectorPartition = uint64(extra) - - } - // t.ReplaceSectorNumber (abi.SectorNumber) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.ReplaceSectorNumber = abi.SectorNumber(extra) - - } - return nil -} - -var lengthBufSectorOnChainInfo = []byte{142} - -func (t *SectorOnChainInfo) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufSectorOnChainInfo); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.SectorNumber (abi.SectorNumber) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SectorNumber)); err != nil { - return err - } - - // t.SealProof (abi.RegisteredSealProof) (int64) - if t.SealProof >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SealProof)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.SealProof-1)); err != nil { - return err - } - } - - // t.SealedCID (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.SealedCID); err != nil { - return xerrors.Errorf("failed to write cid field t.SealedCID: %w", err) - } - - // t.DealIDs ([]abi.DealID) (slice) - if len(t.DealIDs) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.DealIDs was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.DealIDs))); err != nil { - return err - } - for _, v := range t.DealIDs { - if err := cbg.CborWriteHeader(w, cbg.MajUnsignedInt, uint64(v)); err != nil { - return err - } - } - - // t.Activation (abi.ChainEpoch) (int64) - if t.Activation >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Activation)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Activation-1)); err != nil { - return err - } - } - - // t.Expiration (abi.ChainEpoch) (int64) - if t.Expiration >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Expiration)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Expiration-1)); err != nil { - return err - } - } - - // t.DealWeight (big.Int) (struct) - if err := t.DealWeight.MarshalCBOR(w); err != nil { - return err - } - - // t.VerifiedDealWeight (big.Int) (struct) - if err := t.VerifiedDealWeight.MarshalCBOR(w); err != nil { - return err - } - - // t.InitialPledge (big.Int) (struct) - if err := t.InitialPledge.MarshalCBOR(w); err != nil { - return err - } - - // t.ExpectedDayReward (big.Int) (struct) - if err := t.ExpectedDayReward.MarshalCBOR(w); err != nil { - return err - } - - // t.ExpectedStoragePledge (big.Int) (struct) - if err := t.ExpectedStoragePledge.MarshalCBOR(w); err != nil { - return err - } - - // t.ReplacedSectorAge (abi.ChainEpoch) (int64) - if t.ReplacedSectorAge >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.ReplacedSectorAge)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.ReplacedSectorAge-1)); err != nil { - return err - } - } - - // t.ReplacedDayReward (big.Int) (struct) - if err := t.ReplacedDayReward.MarshalCBOR(w); err != nil { - return err - } - - // t.SectorKeyCID (cid.Cid) (struct) - - if t.SectorKeyCID == nil { - if _, err := w.Write(cbg.CborNull); err != nil { - return err - } - } else { - if err := cbg.WriteCidBuf(scratch, w, *t.SectorKeyCID); err != nil { - return xerrors.Errorf("failed to write cid field t.SectorKeyCID: %w", err) - } - } - - return nil -} - -func (t *SectorOnChainInfo) UnmarshalCBOR(r io.Reader) error { - *t = SectorOnChainInfo{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 14 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.SectorNumber (abi.SectorNumber) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.SectorNumber = abi.SectorNumber(extra) - - } - // t.SealProof (abi.RegisteredSealProof) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.SealProof = abi.RegisteredSealProof(extraI) - } - // t.SealedCID (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.SealedCID: %w", err) - } - - t.SealedCID = c - - } - // t.DealIDs ([]abi.DealID) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.DealIDs: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.DealIDs = make([]abi.DealID, extra) - } - - for i := 0; i < int(extra); i++ { - - maj, val, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return xerrors.Errorf("failed to read uint64 for t.DealIDs slice: %w", err) - } - - if maj != cbg.MajUnsignedInt { - return xerrors.Errorf("value read for array t.DealIDs was not a uint, instead got %d", maj) - } - - t.DealIDs[i] = abi.DealID(val) - } - - // t.Activation (abi.ChainEpoch) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.Activation = abi.ChainEpoch(extraI) - } - // t.Expiration (abi.ChainEpoch) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.Expiration = abi.ChainEpoch(extraI) - } - // t.DealWeight (big.Int) (struct) - - { - - if err := t.DealWeight.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.DealWeight: %w", err) - } - - } - // t.VerifiedDealWeight (big.Int) (struct) - - { - - if err := t.VerifiedDealWeight.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.VerifiedDealWeight: %w", err) - } - - } - // t.InitialPledge (big.Int) (struct) - - { - - if err := t.InitialPledge.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.InitialPledge: %w", err) - } - - } - // t.ExpectedDayReward (big.Int) (struct) - - { - - if err := t.ExpectedDayReward.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.ExpectedDayReward: %w", err) - } - - } - // t.ExpectedStoragePledge (big.Int) (struct) - - { - - if err := t.ExpectedStoragePledge.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.ExpectedStoragePledge: %w", err) - } - - } - // t.ReplacedSectorAge (abi.ChainEpoch) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.ReplacedSectorAge = abi.ChainEpoch(extraI) - } - // t.ReplacedDayReward (big.Int) (struct) - - { - - if err := t.ReplacedDayReward.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.ReplacedDayReward: %w", err) - } - - } - // t.SectorKeyCID (cid.Cid) (struct) - - { - - b, err := br.ReadByte() - if err != nil { - return err - } - if b != cbg.CborNull[0] { - if err := br.UnreadByte(); err != nil { - return err - } - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.SectorKeyCID: %w", err) - } - - t.SectorKeyCID = &c - } - - } - return nil -} - -var lengthBufWorkerKeyChange = []byte{130} - -func (t *WorkerKeyChange) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufWorkerKeyChange); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.NewWorker (address.Address) (struct) - if err := t.NewWorker.MarshalCBOR(w); err != nil { - return err - } - - // t.EffectiveAt (abi.ChainEpoch) (int64) - if t.EffectiveAt >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.EffectiveAt)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.EffectiveAt-1)); err != nil { - return err - } - } - return nil -} - -func (t *WorkerKeyChange) UnmarshalCBOR(r io.Reader) error { - *t = WorkerKeyChange{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.NewWorker (address.Address) (struct) - - { - - if err := t.NewWorker.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.NewWorker: %w", err) - } - - } - // t.EffectiveAt (abi.ChainEpoch) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.EffectiveAt = abi.ChainEpoch(extraI) - } - return nil -} - -var lengthBufVestingFunds = []byte{129} - -func (t *VestingFunds) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufVestingFunds); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Funds ([]miner.VestingFund) (slice) - if len(t.Funds) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Funds was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Funds))); err != nil { - return err - } - for _, v := range t.Funds { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - return nil -} - -func (t *VestingFunds) UnmarshalCBOR(r io.Reader) error { - *t = VestingFunds{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Funds ([]miner.VestingFund) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Funds: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Funds = make([]VestingFund, extra) - } - - for i := 0; i < int(extra); i++ { - - var v VestingFund - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.Funds[i] = v - } - - return nil -} - -var lengthBufVestingFund = []byte{130} - -func (t *VestingFund) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufVestingFund); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Epoch (abi.ChainEpoch) (int64) - if t.Epoch >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Epoch)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Epoch-1)); err != nil { - return err - } - } - - // t.Amount (big.Int) (struct) - if err := t.Amount.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *VestingFund) UnmarshalCBOR(r io.Reader) error { - *t = VestingFund{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Epoch (abi.ChainEpoch) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.Epoch = abi.ChainEpoch(extraI) - } - // t.Amount (big.Int) (struct) - - { - - if err := t.Amount.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Amount: %w", err) - } - - } - return nil -} - -var lengthBufWindowedPoSt = []byte{130} - -func (t *WindowedPoSt) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufWindowedPoSt); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Partitions (bitfield.BitField) (struct) - if err := t.Partitions.MarshalCBOR(w); err != nil { - return err - } - - // t.Proofs ([]proof.PoStProof) (slice) - if len(t.Proofs) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Proofs was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Proofs))); err != nil { - return err - } - for _, v := range t.Proofs { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - return nil -} - -func (t *WindowedPoSt) UnmarshalCBOR(r io.Reader) error { - *t = WindowedPoSt{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Partitions (bitfield.BitField) (struct) - - { - - if err := t.Partitions.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Partitions: %w", err) - } - - } - // t.Proofs ([]proof.PoStProof) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Proofs: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Proofs = make([]proof.PoStProof, extra) - } - - for i := 0; i < int(extra); i++ { - - var v proof.PoStProof - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.Proofs[i] = v - } - - return nil -} - -var lengthBufSubmitWindowedPoStParams = []byte{133} - -func (t *SubmitWindowedPoStParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufSubmitWindowedPoStParams); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Deadline (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Deadline)); err != nil { - return err - } - - // t.Partitions ([]miner.PoStPartition) (slice) - if len(t.Partitions) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Partitions was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Partitions))); err != nil { - return err - } - for _, v := range t.Partitions { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - - // t.Proofs ([]proof.PoStProof) (slice) - if len(t.Proofs) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Proofs was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Proofs))); err != nil { - return err - } - for _, v := range t.Proofs { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - - // t.ChainCommitEpoch (abi.ChainEpoch) (int64) - if t.ChainCommitEpoch >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.ChainCommitEpoch)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.ChainCommitEpoch-1)); err != nil { - return err - } - } - - // t.ChainCommitRand (abi.Randomness) (slice) - if len(t.ChainCommitRand) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.ChainCommitRand was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.ChainCommitRand))); err != nil { - return err - } - - if _, err := w.Write(t.ChainCommitRand[:]); err != nil { - return err - } - return nil -} - -func (t *SubmitWindowedPoStParams) UnmarshalCBOR(r io.Reader) error { - *t = SubmitWindowedPoStParams{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 5 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Deadline (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Deadline = uint64(extra) - - } - // t.Partitions ([]miner.PoStPartition) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Partitions: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Partitions = make([]PoStPartition, extra) - } - - for i := 0; i < int(extra); i++ { - - var v PoStPartition - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.Partitions[i] = v - } - - // t.Proofs ([]proof.PoStProof) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Proofs: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Proofs = make([]proof.PoStProof, extra) - } - - for i := 0; i < int(extra); i++ { - - var v proof.PoStProof - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.Proofs[i] = v - } - - // t.ChainCommitEpoch (abi.ChainEpoch) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.ChainCommitEpoch = abi.ChainEpoch(extraI) - } - // t.ChainCommitRand (abi.Randomness) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.ChainCommitRand: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - - if extra > 0 { - t.ChainCommitRand = make([]uint8, extra) - } - - if _, err := io.ReadFull(br, t.ChainCommitRand[:]); err != nil { - return err - } - return nil -} - -var lengthBufTerminateSectorsParams = []byte{129} - -func (t *TerminateSectorsParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufTerminateSectorsParams); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Terminations ([]miner.TerminationDeclaration) (slice) - if len(t.Terminations) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Terminations was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Terminations))); err != nil { - return err - } - for _, v := range t.Terminations { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - return nil -} - -func (t *TerminateSectorsParams) UnmarshalCBOR(r io.Reader) error { - *t = TerminateSectorsParams{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Terminations ([]miner.TerminationDeclaration) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Terminations: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Terminations = make([]TerminationDeclaration, extra) - } - - for i := 0; i < int(extra); i++ { - - var v TerminationDeclaration - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.Terminations[i] = v - } - - return nil -} - -var lengthBufTerminateSectorsReturn = []byte{129} - -func (t *TerminateSectorsReturn) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufTerminateSectorsReturn); err != nil { - return err - } - - // t.Done (bool) (bool) - if err := cbg.WriteBool(w, t.Done); err != nil { - return err - } - return nil -} - -func (t *TerminateSectorsReturn) UnmarshalCBOR(r io.Reader) error { - *t = TerminateSectorsReturn{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Done (bool) (bool) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajOther { - return fmt.Errorf("booleans must be major type 7") - } - switch extra { - case 20: - t.Done = false - case 21: - t.Done = true - default: - return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) - } - return nil -} - -var lengthBufChangePeerIDParams = []byte{129} - -func (t *ChangePeerIDParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufChangePeerIDParams); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.NewID ([]uint8) (slice) - if len(t.NewID) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.NewID was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.NewID))); err != nil { - return err - } - - if _, err := w.Write(t.NewID[:]); err != nil { - return err - } - return nil -} - -func (t *ChangePeerIDParams) UnmarshalCBOR(r io.Reader) error { - *t = ChangePeerIDParams{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.NewID ([]uint8) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.NewID: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - - if extra > 0 { - t.NewID = make([]uint8, extra) - } - - if _, err := io.ReadFull(br, t.NewID[:]); err != nil { - return err - } - return nil -} - -var lengthBufChangeMultiaddrsParams = []byte{129} - -func (t *ChangeMultiaddrsParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufChangeMultiaddrsParams); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.NewMultiaddrs ([][]uint8) (slice) - if len(t.NewMultiaddrs) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.NewMultiaddrs was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.NewMultiaddrs))); err != nil { - return err - } - for _, v := range t.NewMultiaddrs { - if len(v) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field v was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(v))); err != nil { - return err - } - - if _, err := w.Write(v[:]); err != nil { - return err - } - } - return nil -} - -func (t *ChangeMultiaddrsParams) UnmarshalCBOR(r io.Reader) error { - *t = ChangeMultiaddrsParams{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.NewMultiaddrs ([][]uint8) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.NewMultiaddrs: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.NewMultiaddrs = make([][]uint8, extra) - } - - for i := 0; i < int(extra); i++ { - { - var maj byte - var extra uint64 - var err error - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.NewMultiaddrs[i]: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - - if extra > 0 { - t.NewMultiaddrs[i] = make([]uint8, extra) - } - - if _, err := io.ReadFull(br, t.NewMultiaddrs[i][:]); err != nil { - return err - } - } - } - - return nil -} - -var lengthBufProveCommitSectorParams = []byte{130} - -func (t *ProveCommitSectorParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufProveCommitSectorParams); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.SectorNumber (abi.SectorNumber) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SectorNumber)); err != nil { - return err - } - - // t.Proof ([]uint8) (slice) - if len(t.Proof) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.Proof was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Proof))); err != nil { - return err - } - - if _, err := w.Write(t.Proof[:]); err != nil { - return err - } - return nil -} - -func (t *ProveCommitSectorParams) UnmarshalCBOR(r io.Reader) error { - *t = ProveCommitSectorParams{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.SectorNumber (abi.SectorNumber) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.SectorNumber = abi.SectorNumber(extra) - - } - // t.Proof ([]uint8) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.Proof: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - - if extra > 0 { - t.Proof = make([]uint8, extra) - } - - if _, err := io.ReadFull(br, t.Proof[:]); err != nil { - return err - } - return nil -} - -var lengthBufProveCommitAggregateParams = []byte{130} - -func (t *ProveCommitAggregateParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufProveCommitAggregateParams); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.SectorNumbers (bitfield.BitField) (struct) - if err := t.SectorNumbers.MarshalCBOR(w); err != nil { - return err - } - - // t.AggregateProof ([]uint8) (slice) - if len(t.AggregateProof) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.AggregateProof was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.AggregateProof))); err != nil { - return err - } - - if _, err := w.Write(t.AggregateProof[:]); err != nil { - return err - } - return nil -} - -func (t *ProveCommitAggregateParams) UnmarshalCBOR(r io.Reader) error { - *t = ProveCommitAggregateParams{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.SectorNumbers (bitfield.BitField) (struct) - - { - - if err := t.SectorNumbers.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.SectorNumbers: %w", err) - } - - } - // t.AggregateProof ([]uint8) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.AggregateProof: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - - if extra > 0 { - t.AggregateProof = make([]uint8, extra) - } - - if _, err := io.ReadFull(br, t.AggregateProof[:]); err != nil { - return err - } - return nil -} - -var lengthBufChangeWorkerAddressParams = []byte{130} - -func (t *ChangeWorkerAddressParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufChangeWorkerAddressParams); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.NewWorker (address.Address) (struct) - if err := t.NewWorker.MarshalCBOR(w); err != nil { - return err - } - - // t.NewControlAddrs ([]address.Address) (slice) - if len(t.NewControlAddrs) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.NewControlAddrs was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.NewControlAddrs))); err != nil { - return err - } - for _, v := range t.NewControlAddrs { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - return nil -} - -func (t *ChangeWorkerAddressParams) UnmarshalCBOR(r io.Reader) error { - *t = ChangeWorkerAddressParams{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.NewWorker (address.Address) (struct) - - { - - if err := t.NewWorker.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.NewWorker: %w", err) - } - - } - // t.NewControlAddrs ([]address.Address) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.NewControlAddrs: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.NewControlAddrs = make([]address.Address, extra) - } - - for i := 0; i < int(extra); i++ { - - var v address.Address - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.NewControlAddrs[i] = v - } - - return nil -} - -var lengthBufExtendSectorExpirationParams = []byte{129} - -func (t *ExtendSectorExpirationParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufExtendSectorExpirationParams); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Extensions ([]miner.ExpirationExtension) (slice) - if len(t.Extensions) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Extensions was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Extensions))); err != nil { - return err - } - for _, v := range t.Extensions { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - return nil -} - -func (t *ExtendSectorExpirationParams) UnmarshalCBOR(r io.Reader) error { - *t = ExtendSectorExpirationParams{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Extensions ([]miner.ExpirationExtension) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Extensions: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Extensions = make([]ExpirationExtension, extra) - } - - for i := 0; i < int(extra); i++ { - - var v ExpirationExtension - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.Extensions[i] = v - } - - return nil -} - -var lengthBufDeclareFaultsParams = []byte{129} - -func (t *DeclareFaultsParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufDeclareFaultsParams); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Faults ([]miner.FaultDeclaration) (slice) - if len(t.Faults) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Faults was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Faults))); err != nil { - return err - } - for _, v := range t.Faults { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - return nil -} - -func (t *DeclareFaultsParams) UnmarshalCBOR(r io.Reader) error { - *t = DeclareFaultsParams{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Faults ([]miner.FaultDeclaration) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Faults: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Faults = make([]FaultDeclaration, extra) - } - - for i := 0; i < int(extra); i++ { - - var v FaultDeclaration - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.Faults[i] = v - } - - return nil -} - -var lengthBufDeclareFaultsRecoveredParams = []byte{129} - -func (t *DeclareFaultsRecoveredParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufDeclareFaultsRecoveredParams); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Recoveries ([]miner.RecoveryDeclaration) (slice) - if len(t.Recoveries) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Recoveries was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Recoveries))); err != nil { - return err - } - for _, v := range t.Recoveries { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - return nil -} - -func (t *DeclareFaultsRecoveredParams) UnmarshalCBOR(r io.Reader) error { - *t = DeclareFaultsRecoveredParams{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Recoveries ([]miner.RecoveryDeclaration) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Recoveries: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Recoveries = make([]RecoveryDeclaration, extra) - } - - for i := 0; i < int(extra); i++ { - - var v RecoveryDeclaration - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.Recoveries[i] = v - } - - return nil -} - -var lengthBufReportConsensusFaultParams = []byte{131} - -func (t *ReportConsensusFaultParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufReportConsensusFaultParams); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.BlockHeader1 ([]uint8) (slice) - if len(t.BlockHeader1) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.BlockHeader1 was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.BlockHeader1))); err != nil { - return err - } - - if _, err := w.Write(t.BlockHeader1[:]); err != nil { - return err - } - - // t.BlockHeader2 ([]uint8) (slice) - if len(t.BlockHeader2) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.BlockHeader2 was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.BlockHeader2))); err != nil { - return err - } - - if _, err := w.Write(t.BlockHeader2[:]); err != nil { - return err - } - - // t.BlockHeaderExtra ([]uint8) (slice) - if len(t.BlockHeaderExtra) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.BlockHeaderExtra was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.BlockHeaderExtra))); err != nil { - return err - } - - if _, err := w.Write(t.BlockHeaderExtra[:]); err != nil { - return err - } - return nil -} - -func (t *ReportConsensusFaultParams) UnmarshalCBOR(r io.Reader) error { - *t = ReportConsensusFaultParams{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 3 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.BlockHeader1 ([]uint8) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.BlockHeader1: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - - if extra > 0 { - t.BlockHeader1 = make([]uint8, extra) - } - - if _, err := io.ReadFull(br, t.BlockHeader1[:]); err != nil { - return err - } - // t.BlockHeader2 ([]uint8) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.BlockHeader2: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - - if extra > 0 { - t.BlockHeader2 = make([]uint8, extra) - } - - if _, err := io.ReadFull(br, t.BlockHeader2[:]); err != nil { - return err - } - // t.BlockHeaderExtra ([]uint8) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.BlockHeaderExtra: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - - if extra > 0 { - t.BlockHeaderExtra = make([]uint8, extra) - } - - if _, err := io.ReadFull(br, t.BlockHeaderExtra[:]); err != nil { - return err - } - return nil -} - -var lengthBufGetControlAddressesReturn = []byte{131} - -func (t *GetControlAddressesReturn) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufGetControlAddressesReturn); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Owner (address.Address) (struct) - if err := t.Owner.MarshalCBOR(w); err != nil { - return err - } - - // t.Worker (address.Address) (struct) - if err := t.Worker.MarshalCBOR(w); err != nil { - return err - } - - // t.ControlAddrs ([]address.Address) (slice) - if len(t.ControlAddrs) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.ControlAddrs was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.ControlAddrs))); err != nil { - return err - } - for _, v := range t.ControlAddrs { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - return nil -} - -func (t *GetControlAddressesReturn) UnmarshalCBOR(r io.Reader) error { - *t = GetControlAddressesReturn{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 3 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Owner (address.Address) (struct) - - { - - if err := t.Owner.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Owner: %w", err) - } - - } - // t.Worker (address.Address) (struct) - - { - - if err := t.Worker.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Worker: %w", err) - } - - } - // t.ControlAddrs ([]address.Address) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.ControlAddrs: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.ControlAddrs = make([]address.Address, extra) - } - - for i := 0; i < int(extra); i++ { - - var v address.Address - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.ControlAddrs[i] = v - } - - return nil -} - -var lengthBufCheckSectorProvenParams = []byte{129} - -func (t *CheckSectorProvenParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufCheckSectorProvenParams); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.SectorNumber (abi.SectorNumber) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SectorNumber)); err != nil { - return err - } - - return nil -} - -func (t *CheckSectorProvenParams) UnmarshalCBOR(r io.Reader) error { - *t = CheckSectorProvenParams{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.SectorNumber (abi.SectorNumber) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.SectorNumber = abi.SectorNumber(extra) - - } - return nil -} - -var lengthBufWithdrawBalanceParams = []byte{129} - -func (t *WithdrawBalanceParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufWithdrawBalanceParams); err != nil { - return err - } - - // t.AmountRequested (big.Int) (struct) - if err := t.AmountRequested.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *WithdrawBalanceParams) UnmarshalCBOR(r io.Reader) error { - *t = WithdrawBalanceParams{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.AmountRequested (big.Int) (struct) - - { - - if err := t.AmountRequested.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.AmountRequested: %w", err) - } - - } - return nil -} - -var lengthBufCompactPartitionsParams = []byte{130} - -func (t *CompactPartitionsParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufCompactPartitionsParams); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Deadline (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Deadline)); err != nil { - return err - } - - // t.Partitions (bitfield.BitField) (struct) - if err := t.Partitions.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *CompactPartitionsParams) UnmarshalCBOR(r io.Reader) error { - *t = CompactPartitionsParams{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Deadline (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Deadline = uint64(extra) - - } - // t.Partitions (bitfield.BitField) (struct) - - { - - if err := t.Partitions.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Partitions: %w", err) - } - - } - return nil -} - -var lengthBufCompactSectorNumbersParams = []byte{129} - -func (t *CompactSectorNumbersParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufCompactSectorNumbersParams); err != nil { - return err - } - - // t.MaskSectorNumbers (bitfield.BitField) (struct) - if err := t.MaskSectorNumbers.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *CompactSectorNumbersParams) UnmarshalCBOR(r io.Reader) error { - *t = CompactSectorNumbersParams{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.MaskSectorNumbers (bitfield.BitField) (struct) - - { - - if err := t.MaskSectorNumbers.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.MaskSectorNumbers: %w", err) - } - - } - return nil -} - -var lengthBufCronEventPayload = []byte{129} - -func (t *CronEventPayload) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufCronEventPayload); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.EventType (miner.CronEventType) (int64) - if t.EventType >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.EventType)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.EventType-1)); err != nil { - return err - } - } - return nil -} - -func (t *CronEventPayload) UnmarshalCBOR(r io.Reader) error { - *t = CronEventPayload{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.EventType (miner.CronEventType) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.EventType = CronEventType(extraI) - } - return nil -} - -var lengthBufDisputeWindowedPoStParams = []byte{130} - -func (t *DisputeWindowedPoStParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufDisputeWindowedPoStParams); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Deadline (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Deadline)); err != nil { - return err - } - - // t.PoStIndex (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.PoStIndex)); err != nil { - return err - } - - return nil -} - -func (t *DisputeWindowedPoStParams) UnmarshalCBOR(r io.Reader) error { - *t = DisputeWindowedPoStParams{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Deadline (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Deadline = uint64(extra) - - } - // t.PoStIndex (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.PoStIndex = uint64(extra) - - } - return nil -} - -var lengthBufPreCommitSectorBatchParams = []byte{129} - -func (t *PreCommitSectorBatchParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufPreCommitSectorBatchParams); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Sectors ([]miner.SectorPreCommitInfo) (slice) - if len(t.Sectors) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Sectors was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Sectors))); err != nil { - return err - } - for _, v := range t.Sectors { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - return nil -} - -func (t *PreCommitSectorBatchParams) UnmarshalCBOR(r io.Reader) error { - *t = PreCommitSectorBatchParams{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Sectors ([]miner.SectorPreCommitInfo) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Sectors: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Sectors = make([]SectorPreCommitInfo, extra) - } - - for i := 0; i < int(extra); i++ { - - var v SectorPreCommitInfo - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.Sectors[i] = v - } - - return nil -} - -var lengthBufProveReplicaUpdatesParams = []byte{129} - -func (t *ProveReplicaUpdatesParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufProveReplicaUpdatesParams); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Updates ([]miner.ReplicaUpdate) (slice) - if len(t.Updates) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Updates was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Updates))); err != nil { - return err - } - for _, v := range t.Updates { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - return nil -} - -func (t *ProveReplicaUpdatesParams) UnmarshalCBOR(r io.Reader) error { - *t = ProveReplicaUpdatesParams{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Updates ([]miner.ReplicaUpdate) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Updates: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Updates = make([]ReplicaUpdate, extra) - } - - for i := 0; i < int(extra); i++ { - - var v ReplicaUpdate - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.Updates[i] = v - } - - return nil -} - -var lengthBufFaultDeclaration = []byte{131} - -func (t *FaultDeclaration) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufFaultDeclaration); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Deadline (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Deadline)); err != nil { - return err - } - - // t.Partition (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Partition)); err != nil { - return err - } - - // t.Sectors (bitfield.BitField) (struct) - if err := t.Sectors.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *FaultDeclaration) UnmarshalCBOR(r io.Reader) error { - *t = FaultDeclaration{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 3 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Deadline (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Deadline = uint64(extra) - - } - // t.Partition (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Partition = uint64(extra) - - } - // t.Sectors (bitfield.BitField) (struct) - - { - - if err := t.Sectors.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Sectors: %w", err) - } - - } - return nil -} - -var lengthBufRecoveryDeclaration = []byte{131} - -func (t *RecoveryDeclaration) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufRecoveryDeclaration); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Deadline (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Deadline)); err != nil { - return err - } - - // t.Partition (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Partition)); err != nil { - return err - } - - // t.Sectors (bitfield.BitField) (struct) - if err := t.Sectors.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *RecoveryDeclaration) UnmarshalCBOR(r io.Reader) error { - *t = RecoveryDeclaration{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 3 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Deadline (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Deadline = uint64(extra) - - } - // t.Partition (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Partition = uint64(extra) - - } - // t.Sectors (bitfield.BitField) (struct) - - { - - if err := t.Sectors.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Sectors: %w", err) - } - - } - return nil -} - -var lengthBufExpirationExtension = []byte{132} - -func (t *ExpirationExtension) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufExpirationExtension); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Deadline (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Deadline)); err != nil { - return err - } - - // t.Partition (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Partition)); err != nil { - return err - } - - // t.Sectors (bitfield.BitField) (struct) - if err := t.Sectors.MarshalCBOR(w); err != nil { - return err - } - - // t.NewExpiration (abi.ChainEpoch) (int64) - if t.NewExpiration >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.NewExpiration)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.NewExpiration-1)); err != nil { - return err - } - } - return nil -} - -func (t *ExpirationExtension) UnmarshalCBOR(r io.Reader) error { - *t = ExpirationExtension{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 4 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Deadline (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Deadline = uint64(extra) - - } - // t.Partition (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Partition = uint64(extra) - - } - // t.Sectors (bitfield.BitField) (struct) - - { - - if err := t.Sectors.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Sectors: %w", err) - } - - } - // t.NewExpiration (abi.ChainEpoch) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.NewExpiration = abi.ChainEpoch(extraI) - } - return nil -} - -var lengthBufTerminationDeclaration = []byte{131} - -func (t *TerminationDeclaration) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufTerminationDeclaration); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Deadline (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Deadline)); err != nil { - return err - } - - // t.Partition (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Partition)); err != nil { - return err - } - - // t.Sectors (bitfield.BitField) (struct) - if err := t.Sectors.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *TerminationDeclaration) UnmarshalCBOR(r io.Reader) error { - *t = TerminationDeclaration{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 3 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Deadline (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Deadline = uint64(extra) - - } - // t.Partition (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Partition = uint64(extra) - - } - // t.Sectors (bitfield.BitField) (struct) - - { - - if err := t.Sectors.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Sectors: %w", err) - } - - } - return nil -} - -var lengthBufPoStPartition = []byte{130} - -func (t *PoStPartition) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufPoStPartition); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Index (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Index)); err != nil { - return err - } - - // t.Skipped (bitfield.BitField) (struct) - if err := t.Skipped.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *PoStPartition) UnmarshalCBOR(r io.Reader) error { - *t = PoStPartition{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Index (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Index = uint64(extra) - - } - // t.Skipped (bitfield.BitField) (struct) - - { - - if err := t.Skipped.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Skipped: %w", err) - } - - } - return nil -} - -var lengthBufReplicaUpdate = []byte{135} - -func (t *ReplicaUpdate) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufReplicaUpdate); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.SectorID (abi.SectorNumber) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SectorID)); err != nil { - return err - } - - // t.Deadline (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Deadline)); err != nil { - return err - } - - // t.Partition (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Partition)); err != nil { - return err - } - - // t.NewSealedSectorCID (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.NewSealedSectorCID); err != nil { - return xerrors.Errorf("failed to write cid field t.NewSealedSectorCID: %w", err) - } - - // t.Deals ([]abi.DealID) (slice) - if len(t.Deals) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Deals was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Deals))); err != nil { - return err - } - for _, v := range t.Deals { - if err := cbg.CborWriteHeader(w, cbg.MajUnsignedInt, uint64(v)); err != nil { - return err - } - } - - // t.UpdateProofType (abi.RegisteredUpdateProof) (int64) - if t.UpdateProofType >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.UpdateProofType)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.UpdateProofType-1)); err != nil { - return err - } - } - - // t.ReplicaProof ([]uint8) (slice) - if len(t.ReplicaProof) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.ReplicaProof was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.ReplicaProof))); err != nil { - return err - } - - if _, err := w.Write(t.ReplicaProof[:]); err != nil { - return err - } - return nil -} - -func (t *ReplicaUpdate) UnmarshalCBOR(r io.Reader) error { - *t = ReplicaUpdate{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 7 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.SectorID (abi.SectorNumber) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.SectorID = abi.SectorNumber(extra) - - } - // t.Deadline (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Deadline = uint64(extra) - - } - // t.Partition (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Partition = uint64(extra) - - } - // t.NewSealedSectorCID (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.NewSealedSectorCID: %w", err) - } - - t.NewSealedSectorCID = c - - } - // t.Deals ([]abi.DealID) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Deals: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Deals = make([]abi.DealID, extra) - } - - for i := 0; i < int(extra); i++ { - - maj, val, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return xerrors.Errorf("failed to read uint64 for t.Deals slice: %w", err) - } - - if maj != cbg.MajUnsignedInt { - return xerrors.Errorf("value read for array t.Deals was not a uint, instead got %d", maj) - } - - t.Deals[i] = abi.DealID(val) - } - - // t.UpdateProofType (abi.RegisteredUpdateProof) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.UpdateProofType = abi.RegisteredUpdateProof(extraI) - } - // t.ReplicaProof ([]uint8) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.ReplicaProof: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - - if extra > 0 { - t.ReplicaProof = make([]uint8, extra) - } - - if _, err := io.ReadFull(br, t.ReplicaProof[:]); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/miner/deadline_state.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/miner/deadline_state.go deleted file mode 100644 index 1315c8a..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/miner/deadline_state.go +++ /dev/null @@ -1,192 +0,0 @@ -package miner - -import ( - "github.com/filecoin-project/go-bitfield" - "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/go-state-types/builtin/v8/util/adt" - xc "github.com/filecoin-project/go-state-types/exitcode" - "github.com/filecoin-project/go-state-types/proof" - "github.com/ipfs/go-cid" - "golang.org/x/xerrors" -) - -// Deadlines contains Deadline objects, describing the sectors due at the given -// deadline and their state (faulty, terminated, recovering, etc.). -type Deadlines struct { - // Note: we could inline part of the deadline struct (e.g., active/assigned sectors) - // to make new sector assignment cheaper. At the moment, assigning a sector requires - // loading all deadlines to figure out where best to assign new sectors. - Due [WPoStPeriodDeadlines]cid.Cid // []Deadline -} - -// Deadline holds the state for all sectors due at a specific deadline. -type Deadline struct { - // Partitions in this deadline, in order. - // The keys of this AMT are always sequential integers beginning with zero. - Partitions cid.Cid // AMT[PartitionNumber]Partition - - // Maps epochs to partitions that _may_ have sectors that expire in or - // before that epoch, either on-time or early as faults. - // Keys are quantized to final epochs in each proving deadline. - // - // NOTE: Partitions MUST NOT be removed from this queue (until the - // associated epoch has passed) even if they no longer have sectors - // expiring at that epoch. Sectors expiring at this epoch may later be - // recovered, and this queue will not be updated at that time. - ExpirationsEpochs cid.Cid // AMT[ChainEpoch]BitField - - // Partitions that have been proved by window PoSts so far during the - // current challenge window. - // NOTE: This bitfield includes both partitions whose proofs - // were optimistically accepted and stored in - // OptimisticPoStSubmissions, and those whose proofs were - // verified on-chain. - PartitionsPoSted bitfield.BitField - - // Partitions with sectors that terminated early. - EarlyTerminations bitfield.BitField - - // The number of non-terminated sectors in this deadline (incl faulty). - LiveSectors uint64 - - // The total number of sectors in this deadline (incl dead). - TotalSectors uint64 - - // Memoized sum of faulty power in partitions. - FaultyPower PowerPair - - // AMT of optimistically accepted WindowPoSt proofs, submitted during - // the current challenge window. At the end of the challenge window, - // this AMT will be moved to OptimisticPoStSubmissionsSnapshot. WindowPoSt proofs - // verified on-chain do not appear in this AMT. - OptimisticPoStSubmissions cid.Cid // AMT[]WindowedPoSt - - // Snapshot of the miner's sectors AMT at the end of the previous challenge - // window for this deadline. - SectorsSnapshot cid.Cid - - // Snapshot of partition state at the end of the previous challenge - // window for this deadline. - PartitionsSnapshot cid.Cid - - // Snapshot of the proofs submitted by the end of the previous challenge - // window for this deadline. - // - // These proofs may be disputed via DisputeWindowedPoSt. Successfully - // disputed window PoSts are removed from the snapshot. - OptimisticPoStSubmissionsSnapshot cid.Cid -} - -type WindowedPoSt struct { - // Partitions proved by this WindowedPoSt. - Partitions bitfield.BitField - // Array of proofs, one per distinct registered proof type present in - // the sectors being proven. In the usual case of a single proof type, - // this array will always have a single element (independent of number - // of partitions). - Proofs []proof.PoStProof -} - -// Bitwidth of AMTs determined empirically from mutation patterns and projections of mainnet data. -const DeadlinePartitionsAmtBitwidth = 3 // Usually a small array -const DeadlineExpirationAmtBitwidth = 5 - -// Given that 4 partitions can be proven in one post, this AMT's height will -// only exceed the partition AMT's height at ~0.75EiB of storage. -const DeadlineOptimisticPoStSubmissionsAmtBitwidth = 2 - -// -// Deadlines (plural) -// - -func (d *Deadlines) LoadDeadline(store adt.Store, dlIdx uint64) (*Deadline, error) { - if dlIdx >= uint64(len(d.Due)) { - return nil, xc.ErrIllegalArgument.Wrapf("invalid deadline %d", dlIdx) - } - deadline := new(Deadline) - err := store.Get(store.Context(), d.Due[dlIdx], deadline) - if err != nil { - return nil, xc.ErrIllegalState.Wrapf("failed to lookup deadline %d: %w", dlIdx, err) - } - return deadline, nil -} - -func (d *Deadlines) ForEach(store adt.Store, cb func(dlIdx uint64, dl *Deadline) error) error { - for dlIdx := range d.Due { - dl, err := d.LoadDeadline(store, uint64(dlIdx)) - if err != nil { - return err - } - err = cb(uint64(dlIdx), dl) - if err != nil { - return err - } - } - return nil -} - -func (d *Deadlines) UpdateDeadline(store adt.Store, dlIdx uint64, deadline *Deadline) error { - if dlIdx >= uint64(len(d.Due)) { - return xerrors.Errorf("invalid deadline %d", dlIdx) - } - - if err := deadline.ValidateState(); err != nil { - return err - } - - dlCid, err := store.Put(store.Context(), deadline) - if err != nil { - return err - } - d.Due[dlIdx] = dlCid - - return nil -} - -// -// Deadline (singular) -// - -func (d *Deadline) PartitionsArray(store adt.Store) (*adt.Array, error) { - arr, err := adt.AsArray(store, d.Partitions, DeadlinePartitionsAmtBitwidth) - if err != nil { - return nil, xc.ErrIllegalState.Wrapf("failed to load partitions: %w", err) - } - return arr, nil -} - -func (d *Deadline) OptimisticProofsSnapshotArray(store adt.Store) (*adt.Array, error) { - arr, err := adt.AsArray(store, d.OptimisticPoStSubmissionsSnapshot, DeadlineOptimisticPoStSubmissionsAmtBitwidth) - if err != nil { - return nil, xerrors.Errorf("failed to load proofs snapshot: %w", err) - } - return arr, nil -} - -func (d *Deadline) LoadPartition(store adt.Store, partIdx uint64) (*Partition, error) { - partitions, err := d.PartitionsArray(store) - if err != nil { - return nil, err - } - var partition Partition - found, err := partitions.Get(partIdx, &partition) - if err != nil { - return nil, xc.ErrIllegalState.Wrapf("failed to lookup partition %d: %w", partIdx, err) - } - if !found { - return nil, xc.ErrNotFound.Wrapf("no partition %d", partIdx) - } - return &partition, nil -} - -func (d *Deadline) ValidateState() error { - if d.LiveSectors > d.TotalSectors { - return xerrors.Errorf("Deadline left with more live sectors than total: %v", d) - } - - if d.FaultyPower.Raw.LessThan(big.Zero()) || d.FaultyPower.QA.LessThan(big.Zero()) { - return xerrors.Errorf("Deadline left with negative faulty power: %v", d) - } - - return nil -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/miner/deadlines.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/miner/deadlines.go deleted file mode 100644 index df50fbd..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/miner/deadlines.go +++ /dev/null @@ -1,58 +0,0 @@ -package miner - -import ( - "errors" - - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/builtin" - "github.com/filecoin-project/go-state-types/builtin/v8/util/adt" - "github.com/filecoin-project/go-state-types/dline" - "golang.org/x/xerrors" -) - -// Returns deadline-related calculations for a deadline in some proving period and the current epoch. -func NewDeadlineInfo(periodStart abi.ChainEpoch, deadlineIdx uint64, currEpoch abi.ChainEpoch) *dline.Info { - return dline.NewInfo(periodStart, deadlineIdx, currEpoch, WPoStPeriodDeadlines, WPoStProvingPeriod, WPoStChallengeWindow, WPoStChallengeLookback, FaultDeclarationCutoff) -} - -func QuantSpecForDeadline(di *dline.Info) builtin.QuantSpec { - return builtin.NewQuantSpec(WPoStProvingPeriod, di.Last()) -} - -// FindSector returns the deadline and partition index for a sector number. -// It returns an error if the sector number is not tracked by deadlines. -func FindSector(store adt.Store, deadlines *Deadlines, sectorNum abi.SectorNumber) (uint64, uint64, error) { - for dlIdx := range deadlines.Due { - dl, err := deadlines.LoadDeadline(store, uint64(dlIdx)) - if err != nil { - return 0, 0, err - } - - partitions, err := adt.AsArray(store, dl.Partitions, DeadlinePartitionsAmtBitwidth) - if err != nil { - return 0, 0, err - } - var partition Partition - - partIdx := uint64(0) - stopErr := errors.New("stop") - err = partitions.ForEach(&partition, func(i int64) error { - found, err := partition.Sectors.IsSet(uint64(sectorNum)) - if err != nil { - return err - } - if found { - partIdx = uint64(i) - return stopErr - } - return nil - }) - if err == stopErr { - return uint64(dlIdx), partIdx, nil - } else if err != nil { - return 0, 0, err - } - - } - return 0, 0, xerrors.Errorf("sector %d not due at any deadline", sectorNum) -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/miner/methods.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/miner/methods.go deleted file mode 100644 index 37929d8..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/miner/methods.go +++ /dev/null @@ -1,38 +0,0 @@ -package miner - -import ( - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-bitfield" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/builtin/v8/power" -) - -var Methods = []interface{}{ - 1: *new(func(interface{}, *power.MinerConstructorParams) *abi.EmptyValue), // Constructor - 2: *new(func(interface{}, *abi.EmptyValue) *GetControlAddressesReturn), // ControlAddresses - 3: *new(func(interface{}, *ChangeWorkerAddressParams) *abi.EmptyValue), // ChangeWorkerAddress - 4: *new(func(interface{}, *ChangePeerIDParams) *abi.EmptyValue), // ChangePeerID - 5: *new(func(interface{}, *SubmitWindowedPoStParams) *abi.EmptyValue), // SubmitWindowedPoSt - 6: *new(func(interface{}, *PreCommitSectorParams) *abi.EmptyValue), // PreCommitSector - 7: *new(func(interface{}, *ProveCommitSectorParams) *abi.EmptyValue), // ProveCommitSector - 8: *new(func(interface{}, *ExtendSectorExpirationParams) *abi.EmptyValue), // ExtendSectorExpiration - 9: *new(func(interface{}, *TerminateSectorsParams) *TerminateSectorsReturn), // TerminateSectors - 10: *new(func(interface{}, *DeclareFaultsParams) *abi.EmptyValue), // DeclareFaults - 11: *new(func(interface{}, *DeclareFaultsRecoveredParams) *abi.EmptyValue), // DeclareFaultsRecovered - 12: *new(func(interface{}, *DeferredCronEventParams) *abi.EmptyValue), // OnDeferredCronEvent - 13: *new(func(interface{}, *CheckSectorProvenParams) *abi.EmptyValue), // CheckSectorProven - 14: *new(func(interface{}, *ApplyRewardParams) *abi.EmptyValue), // ApplyRewards - 15: *new(func(interface{}, *ReportConsensusFaultParams) *abi.EmptyValue), // ReportConsensusFault - 16: *new(func(interface{}, *WithdrawBalanceParams) *abi.TokenAmount), // WithdrawBalance - 17: *new(func(interface{}, *ConfirmSectorProofsParams) *abi.EmptyValue), // ConfirmSectorProofsValid - 18: *new(func(interface{}, *ChangeMultiaddrsParams) *abi.EmptyValue), // ChangeMultiaddrs - 19: *new(func(interface{}, *CompactPartitionsParams) *abi.EmptyValue), // CompactPartitions - 20: *new(func(interface{}, *CompactSectorNumbersParams) *abi.EmptyValue), // CompactSectorNumbers - 21: *new(func(interface{}, *abi.EmptyValue) *abi.EmptyValue), // ConfirmUpdateWorkerKey - 22: *new(func(interface{}, *abi.EmptyValue) *abi.EmptyValue), // RepayDebt - 23: *new(func(interface{}, *address.Address) *abi.EmptyValue), // ChangeOwnerAddress - 24: *new(func(interface{}, *DisputeWindowedPoStParams) *abi.EmptyValue), // DisputeWindowedPoSt - 25: *new(func(interface{}, *PreCommitSectorBatchParams) *abi.EmptyValue), // PreCommitSectorBatch - 26: *new(func(interface{}, *ProveCommitAggregateParams) *abi.EmptyValue), // ProveCommitAggregate - 27: *new(func(interface{}, *ProveReplicaUpdatesParams) *bitfield.BitField), // ProveReplicaUpdates -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/miner/miner_state.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/miner/miner_state.go deleted file mode 100644 index ec8fe08..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/miner/miner_state.go +++ /dev/null @@ -1,301 +0,0 @@ -package miner - -import ( - addr "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-bitfield" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/go-state-types/builtin" - "github.com/filecoin-project/go-state-types/builtin/v8/util/adt" - "github.com/filecoin-project/go-state-types/dline" - xc "github.com/filecoin-project/go-state-types/exitcode" - cid "github.com/ipfs/go-cid" - "golang.org/x/xerrors" -) - -// Balance of Miner Actor should be greater than or equal to -// the sum of PreCommitDeposits and LockedFunds. -// It is possible for balance to fall below the sum of -// PCD, LF and InitialPledgeRequirements, and this is a bad -// state (IP Debt) that limits a miner actor's behavior (i.e. no balance withdrawals) -// Excess balance as computed by st.GetAvailableBalance will be -// withdrawable or usable for pre-commit deposit or pledge lock-up. -type State struct { - // Information not related to sectors. - Info cid.Cid - - PreCommitDeposits abi.TokenAmount // Total funds locked as PreCommitDeposits - LockedFunds abi.TokenAmount // Total rewards and added funds locked in vesting table - - VestingFunds cid.Cid // VestingFunds (Vesting Funds schedule for the miner). - - FeeDebt abi.TokenAmount // Absolute value of debt this miner owes from unpaid fees - - InitialPledge abi.TokenAmount // Sum of initial pledge requirements of all active sectors - - // Sectors that have been pre-committed but not yet proven. - PreCommittedSectors cid.Cid // Map, HAMT[SectorNumber]SectorPreCommitOnChainInfo - - // PreCommittedSectorsCleanUp maintains the state required to cleanup expired PreCommittedSectors. - PreCommittedSectorsCleanUp cid.Cid // BitFieldQueue (AMT[Epoch]*BitField) - - // Allocated sector IDs. Sector IDs can never be reused once allocated. - AllocatedSectors cid.Cid // BitField - - // Information for all proven and not-yet-garbage-collected sectors. - // - // Sectors are removed from this AMT when the partition to which the - // sector belongs is compacted. - Sectors cid.Cid // Array, AMT[SectorNumber]SectorOnChainInfo (sparse) - - // DEPRECATED. This field will change names and no longer be updated every proving period in a future upgrade - // The first epoch in this miner's current proving period. This is the first epoch in which a PoSt for a - // partition at the miner's first deadline may arrive. Alternatively, it is after the last epoch at which - // a PoSt for the previous window is valid. - // Always greater than zero, this may be greater than the current epoch for genesis miners in the first - // WPoStProvingPeriod epochs of the chain; the epochs before the first proving period starts are exempt from Window - // PoSt requirements. - // Updated at the end of every period by a cron callback. - ProvingPeriodStart abi.ChainEpoch - - // DEPRECATED. This field will be removed from state in a future upgrade. - // Index of the deadline within the proving period beginning at ProvingPeriodStart that has not yet been - // finalized. - // Updated at the end of each deadline window by a cron callback. - CurrentDeadline uint64 - - // The sector numbers due for PoSt at each deadline in the current proving period, frozen at period start. - // New sectors are added and expired ones removed at proving period boundary. - // Faults are not subtracted from this in state, but on the fly. - Deadlines cid.Cid - - // Deadlines with outstanding fees for early sector termination. - EarlyTerminations bitfield.BitField - - // True when miner cron is active, false otherwise - DeadlineCronActive bool -} - -// Bitwidth of AMTs determined empirically from mutation patterns and projections of mainnet data. -const PrecommitCleanUpAmtBitwidth = 6 -const SectorsAmtBitwidth = 5 - -type MinerInfo struct { - // Account that owns this miner. - // - Income and returned collateral are paid to this address. - // - This address is also allowed to change the worker address for the miner. - Owner addr.Address // Must be an ID-address. - - // Worker account for this miner. - // The associated pubkey-type address is used to sign blocks and messages on behalf of this miner. - Worker addr.Address // Must be an ID-address. - - // Additional addresses that are permitted to submit messages controlling this actor (optional). - ControlAddresses []addr.Address // Must all be ID addresses. - - PendingWorkerKey *WorkerKeyChange - - // Byte array representing a Libp2p identity that should be used when connecting to this miner. - PeerId abi.PeerID - - // Slice of byte arrays representing Libp2p multi-addresses used for establishing a connection with this miner. - Multiaddrs []abi.Multiaddrs - - // The proof type used for Window PoSt for this miner. - // A miner may commit sectors with different seal proof types (but compatible sector size and - // corresponding PoSt proof types). - WindowPoStProofType abi.RegisteredPoStProof - - // Amount of space in each sector committed by this miner. - // This is computed from the proof type and represented here redundantly. - SectorSize abi.SectorSize - - // The number of sectors in each Window PoSt partition (proof). - // This is computed from the proof type and represented here redundantly. - WindowPoStPartitionSectors uint64 - - // The next epoch this miner is eligible for certain permissioned actor methods - // and winning block elections as a result of being reported for a consensus fault. - ConsensusFaultElapsed abi.ChainEpoch - - // A proposed new owner account for this miner. - // Must be confirmed by a message from the pending address itself. - PendingOwnerAddress *addr.Address -} - -type WorkerKeyChange struct { - NewWorker addr.Address // Must be an ID address - EffectiveAt abi.ChainEpoch -} - -// Information provided by a miner when pre-committing a sector. -type SectorPreCommitInfo struct { - SealProof abi.RegisteredSealProof - SectorNumber abi.SectorNumber - SealedCID cid.Cid `checked:"true"` // CommR - SealRandEpoch abi.ChainEpoch - DealIDs []abi.DealID - Expiration abi.ChainEpoch - ReplaceCapacity bool // Whether to replace a "committed capacity" no-deal sector (requires non-empty DealIDs) - // The committed capacity sector to replace, and it's deadline/partition location - ReplaceSectorDeadline uint64 - ReplaceSectorPartition uint64 - ReplaceSectorNumber abi.SectorNumber -} - -// Information stored on-chain for a pre-committed sector. -type SectorPreCommitOnChainInfo struct { - Info SectorPreCommitInfo - PreCommitDeposit abi.TokenAmount - PreCommitEpoch abi.ChainEpoch - DealWeight abi.DealWeight // Integral of active deals over sector lifetime - VerifiedDealWeight abi.DealWeight // Integral of active verified deals over sector lifetime -} - -// Information stored on-chain for a proven sector. -type SectorOnChainInfo struct { - SectorNumber abi.SectorNumber - SealProof abi.RegisteredSealProof // The seal proof type implies the PoSt proof/s - SealedCID cid.Cid // CommR - DealIDs []abi.DealID - Activation abi.ChainEpoch // Epoch during which the sector proof was accepted - Expiration abi.ChainEpoch // Epoch during which the sector expires - DealWeight abi.DealWeight // Integral of active deals over sector lifetime - VerifiedDealWeight abi.DealWeight // Integral of active verified deals over sector lifetime - InitialPledge abi.TokenAmount // Pledge collected to commit this sector - ExpectedDayReward abi.TokenAmount // Expected one day projection of reward for sector computed at activation time - ExpectedStoragePledge abi.TokenAmount // Expected twenty day projection of reward for sector computed at activation time - ReplacedSectorAge abi.ChainEpoch // Age of sector this sector replaced or zero - ReplacedDayReward abi.TokenAmount // Day reward of sector this sector replace or zero - SectorKeyCID *cid.Cid // The original SealedSectorCID, only gets set on the first ReplicaUpdate -} - -func (st *State) GetInfo(store adt.Store) (*MinerInfo, error) { - var info MinerInfo - if err := store.Get(store.Context(), st.Info, &info); err != nil { - return nil, xerrors.Errorf("failed to get miner info %w", err) - } - return &info, nil -} - -// Returns deadline calculations for the state recorded proving period and deadline. This is out of date if the a -// miner does not have an active miner cron -func (st *State) RecordedDeadlineInfo(currEpoch abi.ChainEpoch) *dline.Info { - return NewDeadlineInfo(st.ProvingPeriodStart, st.CurrentDeadline, currEpoch) -} - -// Returns deadline calculations for the current (according to state) proving period -func (st *State) QuantSpecForDeadline(dlIdx uint64) builtin.QuantSpec { - return QuantSpecForDeadline(NewDeadlineInfo(st.ProvingPeriodStart, dlIdx, 0)) -} - -func (st *State) GetPrecommittedSector(store adt.Store, sectorNo abi.SectorNumber) (*SectorPreCommitOnChainInfo, bool, error) { - precommitted, err := adt.AsMap(store, st.PreCommittedSectors, builtin.DefaultHamtBitwidth) - if err != nil { - return nil, false, err - } - - var info SectorPreCommitOnChainInfo - found, err := precommitted.Get(SectorKey(sectorNo), &info) - if err != nil { - return nil, false, xerrors.Errorf("failed to load precommitment for %v: %w", sectorNo, err) - } - return &info, found, nil -} - -func (st *State) GetSector(store adt.Store, sectorNo abi.SectorNumber) (*SectorOnChainInfo, bool, error) { - sectors, err := LoadSectors(store, st.Sectors) - if err != nil { - return nil, false, err - } - - return sectors.Get(sectorNo) -} - -func (st *State) FindSector(store adt.Store, sno abi.SectorNumber) (uint64, uint64, error) { - deadlines, err := st.LoadDeadlines(store) - if err != nil { - return 0, 0, err - } - return FindSector(store, deadlines, sno) -} - -func (st *State) LoadDeadlines(store adt.Store) (*Deadlines, error) { - var deadlines Deadlines - if err := store.Get(store.Context(), st.Deadlines, &deadlines); err != nil { - return nil, xc.ErrIllegalState.Wrapf("failed to load deadlines (%s): %w", st.Deadlines, err) - } - - return &deadlines, nil -} - -func (st *State) SaveDeadlines(store adt.Store, deadlines *Deadlines) error { - c, err := store.Put(store.Context(), deadlines) - if err != nil { - return err - } - st.Deadlines = c - return nil -} - -// LoadVestingFunds loads the vesting funds table from the store -func (st *State) LoadVestingFunds(store adt.Store) (*VestingFunds, error) { - var funds VestingFunds - if err := store.Get(store.Context(), st.VestingFunds, &funds); err != nil { - return nil, xerrors.Errorf("failed to load vesting funds (%s): %w", st.VestingFunds, err) - } - - return &funds, nil -} - -// CheckVestedFunds returns the amount of vested funds that have vested before the provided epoch. -func (st *State) CheckVestedFunds(store adt.Store, currEpoch abi.ChainEpoch) (abi.TokenAmount, error) { - vestingFunds, err := st.LoadVestingFunds(store) - if err != nil { - return big.Zero(), xerrors.Errorf("failed to load vesting funds: %w", err) - } - - amountVested := abi.NewTokenAmount(0) - - for i := range vestingFunds.Funds { - vf := vestingFunds.Funds[i] - epoch := vf.Epoch - amount := vf.Amount - - if epoch >= currEpoch { - break - } - - amountVested = big.Add(amountVested, amount) - } - - return amountVested, nil -} - -// Unclaimed funds that are not locked -- includes free funds and does not -// account for fee debt. Always greater than or equal to zero -func (st *State) GetUnlockedBalance(actorBalance abi.TokenAmount) (abi.TokenAmount, error) { - unlockedBalance := big.Subtract(actorBalance, st.LockedFunds, st.PreCommitDeposits, st.InitialPledge) - if unlockedBalance.LessThan(big.Zero()) { - return big.Zero(), xerrors.Errorf("negative unlocked balance %v", unlockedBalance) - } - return unlockedBalance, nil -} - -// Unclaimed funds. Actor balance - (locked funds, precommit deposit, initial pledge, fee debt) -// Can go negative if the miner is in IP debt -func (st *State) GetAvailableBalance(actorBalance abi.TokenAmount) (abi.TokenAmount, error) { - unlockedBalance, err := st.GetUnlockedBalance(actorBalance) - if err != nil { - return big.Zero(), err - } - return big.Subtract(unlockedBalance, st.FeeDebt), nil -} - -// -// Misc helpers -// - -func SectorKey(e abi.SectorNumber) abi.Keyer { - return abi.UIntKey(uint64(e)) -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/miner/miner_types.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/miner/miner_types.go deleted file mode 100644 index 8f401a7..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/miner/miner_types.go +++ /dev/null @@ -1,317 +0,0 @@ -package miner - -import ( - addr "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-bitfield" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/builtin" - "github.com/filecoin-project/go-state-types/builtin/v8/power" - "github.com/filecoin-project/go-state-types/builtin/v8/util/adt" - "github.com/filecoin-project/go-state-types/builtin/v8/util/smoothing" - xc "github.com/filecoin-project/go-state-types/exitcode" - "github.com/filecoin-project/go-state-types/proof" - "github.com/ipfs/go-cid" - "golang.org/x/xerrors" -) - -type DeclareFaultsRecoveredParams struct { - Recoveries []RecoveryDeclaration -} - -type RecoveryDeclaration struct { - // The deadline to which the recovered sectors are assigned, in range [0..WPoStPeriodDeadlines) - Deadline uint64 - // Partition index within the deadline containing the recovered sectors. - Partition uint64 - // Sectors in the partition being declared recovered. - Sectors bitfield.BitField -} - -type DeclareFaultsParams struct { - Faults []FaultDeclaration -} - -type FaultDeclaration struct { - // The deadline to which the faulty sectors are assigned, in range [0..WPoStPeriodDeadlines) - Deadline uint64 - // Partition index within the deadline containing the faulty sectors. - Partition uint64 - // Sectors in the partition being declared faulty. - Sectors bitfield.BitField -} - -type ReplicaUpdate struct { - SectorID abi.SectorNumber - Deadline uint64 - Partition uint64 - NewSealedSectorCID cid.Cid `checked:"true"` - Deals []abi.DealID - UpdateProofType abi.RegisteredUpdateProof - ReplicaProof []byte -} - -type ProveReplicaUpdatesParams struct { - Updates []ReplicaUpdate -} - -type PoStPartition struct { - // Partitions are numbered per-deadline, from zero. - Index uint64 - // Sectors skipped while proving that weren't already declared faulty - Skipped bitfield.BitField -} - -// Information submitted by a miner to provide a Window PoSt. -type SubmitWindowedPoStParams struct { - // The deadline index which the submission targets. - Deadline uint64 - // The partitions being proven. - Partitions []PoStPartition - // Array of proofs, one per distinct registered proof type present in the sectors being proven. - // In the usual case of a single proof type, this array will always have a single element (independent of number of partitions). - Proofs []proof.PoStProof - // The epoch at which these proofs is being committed to a particular chain. - ChainCommitEpoch abi.ChainEpoch - // The ticket randomness on the chain at the ChainCommitEpoch on the chain this post is committed to - ChainCommitRand abi.Randomness -} - -type DisputeWindowedPoStParams struct { - Deadline uint64 - PoStIndex uint64 // only one is allowed at a time to avoid loading too many sector infos. -} - -type ProveCommitAggregateParams struct { - SectorNumbers bitfield.BitField - AggregateProof []byte -} - -type ProveCommitSectorParams struct { - SectorNumber abi.SectorNumber - Proof []byte -} - -type MinerConstructorParams = power.MinerConstructorParams - -type TerminateSectorsParams struct { - Terminations []TerminationDeclaration -} - -type TerminationDeclaration struct { - Deadline uint64 - Partition uint64 - Sectors bitfield.BitField -} - -type TerminateSectorsReturn struct { - // Set to true if all early termination work has been completed. When - // false, the miner may choose to repeatedly invoke TerminateSectors - // with no new sectors to process the remainder of the pending - // terminations. While pending terminations are outstanding, the miner - // will not be able to withdraw funds. - Done bool -} - -type ChangePeerIDParams struct { - NewID abi.PeerID -} - -type ChangeMultiaddrsParams struct { - NewMultiaddrs []abi.Multiaddrs -} - -type ChangeWorkerAddressParams struct { - NewWorker addr.Address - NewControlAddrs []addr.Address -} - -type ExtendSectorExpirationParams struct { - Extensions []ExpirationExtension -} - -type ExpirationExtension struct { - Deadline uint64 - Partition uint64 - Sectors bitfield.BitField - NewExpiration abi.ChainEpoch -} - -type ReportConsensusFaultParams struct { - BlockHeader1 []byte - BlockHeader2 []byte - BlockHeaderExtra []byte -} - -type GetControlAddressesReturn struct { - Owner addr.Address - Worker addr.Address - ControlAddrs []addr.Address -} - -type CheckSectorProvenParams struct { - SectorNumber abi.SectorNumber -} - -type WithdrawBalanceParams struct { - AmountRequested abi.TokenAmount -} - -type CompactPartitionsParams struct { - Deadline uint64 - Partitions bitfield.BitField -} - -type CompactSectorNumbersParams struct { - MaskSectorNumbers bitfield.BitField -} - -type CronEventType int64 - -const ( - CronEventWorkerKeyChange CronEventType = iota - CronEventProvingDeadline - CronEventProcessEarlyTerminations -) - -type CronEventPayload struct { - EventType CronEventType -} - -// Identifier for a single partition within a miner. -type PartitionKey struct { - Deadline uint64 - Partition uint64 -} - -type PreCommitSectorBatchParams struct { - Sectors []SectorPreCommitInfo -} - -type PreCommitSectorParams struct { - SealProof abi.RegisteredSealProof - SectorNumber abi.SectorNumber - SealedCID cid.Cid `checked:"true"` // CommR - SealRandEpoch abi.ChainEpoch - DealIDs []abi.DealID - Expiration abi.ChainEpoch - ReplaceCapacity bool // DEPRECATED: Whether to replace a "committed capacity" no-deal sector (requires non-empty DealIDs) - // DEPRECATED: The committed capacity sector to replace, and it's deadline/partition location - ReplaceSectorDeadline uint64 - ReplaceSectorPartition uint64 - ReplaceSectorNumber abi.SectorNumber -} - -// ExpirationSet is a collection of sector numbers that are expiring, either due to -// expected "on-time" expiration at the end of their life, or unexpected "early" termination -// due to being faulty for too long consecutively. -// Note that there is not a direct correspondence between on-time sectors and active power; -// a sector may be faulty but expiring on-time if it faults just prior to expected termination. -// Early sectors are always faulty, and active power always represents on-time sectors. -type ExpirationSet struct { - OnTimeSectors bitfield.BitField // Sectors expiring "on time" at the end of their committed life - EarlySectors bitfield.BitField // Sectors expiring "early" due to being faulty for too long - OnTimePledge abi.TokenAmount // Pledge total for the on-time sectors - ActivePower PowerPair // Power that is currently active (not faulty) - FaultyPower PowerPair // Power that is currently faulty -} - -// A queue of expiration sets by epoch, representing the on-time or early termination epoch for a collection of sectors. -// Wraps an AMT[ChainEpoch]*ExpirationSet. -// Keys in the queue are quantized (upwards), modulo some offset, to reduce the cardinality of keys. -type ExpirationQueue struct { - *adt.Array - quant builtin.QuantSpec -} - -// Loads a queue root. -// Epochs provided to subsequent method calls will be quantized upwards to quanta mod offsetSeed before being -// written to/read from queue entries. -func LoadExpirationQueue(store adt.Store, root cid.Cid, quant builtin.QuantSpec, bitwidth int) (ExpirationQueue, error) { - arr, err := adt.AsArray(store, root, bitwidth) - if err != nil { - return ExpirationQueue{}, xerrors.Errorf("failed to load epoch queue %v: %w", root, err) - } - return ExpirationQueue{arr, quant}, nil -} -func LoadSectors(store adt.Store, root cid.Cid) (Sectors, error) { - sectorsArr, err := adt.AsArray(store, root, SectorsAmtBitwidth) - if err != nil { - return Sectors{}, err - } - return Sectors{sectorsArr}, nil -} - -// Sectors is a helper type for accessing/modifying a miner's sectors. It's safe -// to pass this object around as needed. -type Sectors struct { - *adt.Array -} - -func (sa Sectors) Load(sectorNos bitfield.BitField) ([]*SectorOnChainInfo, error) { - var sectorInfos []*SectorOnChainInfo - if err := sectorNos.ForEach(func(i uint64) error { - var sectorOnChain SectorOnChainInfo - found, err := sa.Array.Get(i, §orOnChain) - if err != nil { - return xc.ErrIllegalState.Wrapf("failed to load sector %v: %w", abi.SectorNumber(i), err) - } else if !found { - return xc.ErrNotFound.Wrapf("can't find sector %d", i) - } - sectorInfos = append(sectorInfos, §orOnChain) - return nil - }); err != nil { - // Keep the underlying error code, unless the error was from - // traversing the bitfield. In that case, it's an illegal - // argument error. - return nil, xc.Unwrap(err, xc.ErrIllegalArgument).Wrapf("failed to load sectors: %w", err) - } - return sectorInfos, nil -} - -func (sa Sectors) Get(sectorNumber abi.SectorNumber) (info *SectorOnChainInfo, found bool, err error) { - var res SectorOnChainInfo - if found, err := sa.Array.Get(uint64(sectorNumber), &res); err != nil { - return nil, false, xerrors.Errorf("failed to get sector %d: %w", sectorNumber, err) - } else if !found { - return nil, false, nil - } - return &res, true, nil -} - -// VestingFunds represents the vesting table state for the miner. -// It is a slice of (VestingEpoch, VestingAmount). -// The slice will always be sorted by the VestingEpoch. -type VestingFunds struct { - Funds []VestingFund -} - -// VestingFund represents miner funds that will vest at the given epoch. -type VestingFund struct { - Epoch abi.ChainEpoch - Amount abi.TokenAmount -} - -// ConstructVestingFunds constructs empty VestingFunds state. -func ConstructVestingFunds() *VestingFunds { - v := new(VestingFunds) - v.Funds = nil - return v -} - -type DeferredCronEventParams struct { - EventPayload []byte - RewardSmoothed smoothing.FilterEstimate - QualityAdjPowerSmoothed smoothing.FilterEstimate -} - -type ApplyRewardParams struct { - Reward abi.TokenAmount - Penalty abi.TokenAmount -} - -type ConfirmSectorProofsParams struct { - Sectors []abi.SectorNumber - RewardSmoothed smoothing.FilterEstimate - RewardBaselinePower abi.StoragePower - QualityAdjPowerSmoothed smoothing.FilterEstimate -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/miner/monies.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/miner/monies.go deleted file mode 100644 index b50a1c5..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/miner/monies.go +++ /dev/null @@ -1,116 +0,0 @@ -package miner - -import ( - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/go-state-types/builtin" - "github.com/filecoin-project/go-state-types/builtin/v8/util/math" - "github.com/filecoin-project/go-state-types/builtin/v8/util/smoothing" -) - -// Projection period of expected sector block reward for deposit required to pre-commit a sector. -// This deposit is lost if the pre-commitment is not timely followed up by a commitment proof. -var PreCommitDepositFactor = 20 // PARAM_SPEC -var PreCommitDepositProjectionPeriod = abi.ChainEpoch(PreCommitDepositFactor) * builtin.EpochsInDay - -// Projection period of expected sector block rewards for storage pledge required to commit a sector. -// This pledge is lost if a sector is terminated before its full committed lifetime. -var InitialPledgeFactor = 20 // PARAM_SPEC -var InitialPledgeProjectionPeriod = abi.ChainEpoch(InitialPledgeFactor) * builtin.EpochsInDay - -// Cap on initial pledge requirement for sectors. -// The target is 1 FIL (10**18 attoFIL) per 32GiB. -// This does not divide evenly, so the result is fractionally smaller. -var InitialPledgeMaxPerByte = big.Div(big.NewInt(1e18), big.NewInt(32<<30)) - -// Multiplier of share of circulating money supply for consensus pledge required to commit a sector. -// This pledge is lost if a sector is terminated before its full committed lifetime. -var InitialPledgeLockTarget = builtin.BigFrac{ - Numerator: big.NewInt(3), // PARAM_SPEC - Denominator: big.NewInt(10), -} - -// The projected block reward a sector would earn over some period. -// Also known as "BR(t)". -// BR(t) = ProjectedRewardFraction(t) * SectorQualityAdjustedPower -// ProjectedRewardFraction(t) is the sum of estimated reward over estimated total power -// over all epochs in the projection period [t t+projectionDuration] -func ExpectedRewardForPower(rewardEstimate, networkQAPowerEstimate smoothing.FilterEstimate, qaSectorPower abi.StoragePower, projectionDuration abi.ChainEpoch) abi.TokenAmount { - networkQAPowerSmoothed := smoothing.Estimate(&networkQAPowerEstimate) - if networkQAPowerSmoothed.IsZero() { - return smoothing.Estimate(&rewardEstimate) - } - expectedRewardForProvingPeriod := smoothing.ExtrapolatedCumSumOfRatio(projectionDuration, 0, rewardEstimate, networkQAPowerEstimate) - br128 := big.Mul(qaSectorPower, expectedRewardForProvingPeriod) // Q.0 * Q.128 => Q.128 - br := big.Rsh(br128, math.Precision128) - - return big.Max(br, big.Zero()) -} - -// BR but zero values are clamped at 1 attofil -// Some uses of BR (PCD, IP) require a strictly positive value for BR derived values so -// accounting variables can be used as succinct indicators of miner activity. -func ExpectedRewardForPowerClampedAtAttoFIL(rewardEstimate, networkQAPowerEstimate smoothing.FilterEstimate, qaSectorPower abi.StoragePower, projectionDuration abi.ChainEpoch) abi.TokenAmount { - br := ExpectedRewardForPower(rewardEstimate, networkQAPowerEstimate, qaSectorPower, projectionDuration) - if br.LessThanEqual(big.Zero()) { - br = abi.NewTokenAmount(1) - } - return br -} - -// Computes the PreCommit deposit given sector qa weight and current network conditions. -// PreCommit Deposit = BR(PreCommitDepositProjectionPeriod) -func PreCommitDepositForPower(rewardEstimate, networkQAPowerEstimate smoothing.FilterEstimate, qaSectorPower abi.StoragePower) abi.TokenAmount { - return ExpectedRewardForPowerClampedAtAttoFIL(rewardEstimate, networkQAPowerEstimate, qaSectorPower, PreCommitDepositProjectionPeriod) -} - -// Computes the pledge requirement for committing new quality-adjusted power to the network, given the current -// network total and baseline power, per-epoch reward, and circulating token supply. -// The pledge comprises two parts: -// - storage pledge, aka IP base: a multiple of the reward expected to be earned by newly-committed power -// - consensus pledge, aka additional IP: a pro-rata fraction of the circulating money supply -// -// IP = IPBase(t) + AdditionalIP(t) -// IPBase(t) = BR(t, InitialPledgeProjectionPeriod) -// AdditionalIP(t) = LockTarget(t)*PledgeShare(t) -// LockTarget = (LockTargetFactorNum / LockTargetFactorDenom) * FILCirculatingSupply(t) -// PledgeShare(t) = sectorQAPower / max(BaselinePower(t), NetworkQAPower(t)) -func InitialPledgeForPower(qaPower, baselinePower abi.StoragePower, rewardEstimate, networkQAPowerEstimate smoothing.FilterEstimate, circulatingSupply abi.TokenAmount) abi.TokenAmount { - ipBase := ExpectedRewardForPowerClampedAtAttoFIL(rewardEstimate, networkQAPowerEstimate, qaPower, InitialPledgeProjectionPeriod) - - lockTargetNum := big.Mul(InitialPledgeLockTarget.Numerator, circulatingSupply) - lockTargetDenom := InitialPledgeLockTarget.Denominator - pledgeShareNum := qaPower - networkQAPower := smoothing.Estimate(&networkQAPowerEstimate) - pledgeShareDenom := big.Max(big.Max(networkQAPower, baselinePower), qaPower) // use qaPower in case others are 0 - additionalIPNum := big.Mul(lockTargetNum, pledgeShareNum) - additionalIPDenom := big.Mul(lockTargetDenom, pledgeShareDenom) - additionalIP := big.Div(additionalIPNum, additionalIPDenom) - - nominalPledge := big.Add(ipBase, additionalIP) - spaceRacePledgeCap := big.Mul(InitialPledgeMaxPerByte, qaPower) - return big.Min(nominalPledge, spaceRacePledgeCap) -} - -var EstimatedSingleProveCommitGasUsage = big.NewInt(49299973) // PARAM_SPEC -var EstimatedSinglePreCommitGasUsage = big.NewInt(16433324) // PARAM_SPEC -var BatchDiscount = builtin.BigFrac{ // PARAM_SPEC - Numerator: big.NewInt(1), - Denominator: big.NewInt(20), -} -var BatchBalancer = big.Mul(big.NewInt(5), builtin.OneNanoFIL) // PARAM_SPEC - -func AggregateProveCommitNetworkFee(aggregateSize int, baseFee abi.TokenAmount) abi.TokenAmount { - return aggregateNetworkFee(aggregateSize, EstimatedSingleProveCommitGasUsage, baseFee) -} - -func AggregatePreCommitNetworkFee(aggregateSize int, baseFee abi.TokenAmount) abi.TokenAmount { - return aggregateNetworkFee(aggregateSize, EstimatedSinglePreCommitGasUsage, baseFee) -} - -func aggregateNetworkFee(aggregateSize int, gasUsage big.Int, baseFee abi.TokenAmount) abi.TokenAmount { - effectiveGasFee := big.Max(baseFee, BatchBalancer) - networkFeeNum := big.Product(effectiveGasFee, gasUsage, big.NewInt(int64(aggregateSize)), BatchDiscount.Numerator) - networkFee := big.Div(networkFeeNum, BatchDiscount.Denominator) - return networkFee -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/miner/partition_state.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/miner/partition_state.go deleted file mode 100644 index df94d3a..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/miner/partition_state.go +++ /dev/null @@ -1,116 +0,0 @@ -package miner - -import ( - "github.com/filecoin-project/go-bitfield" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - "github.com/ipfs/go-cid" - "golang.org/x/xerrors" -) - -type Partition struct { - // Sector numbers in this partition, including faulty, unproven, and terminated sectors. - Sectors bitfield.BitField - // Unproven sectors in this partition. This bitfield will be cleared on - // a successful window post (or at the end of the partition's next - // deadline). At that time, any still unproven sectors will be added to - // the faulty sector bitfield. - Unproven bitfield.BitField - // Subset of sectors detected/declared faulty and not yet recovered (excl. from PoSt). - // Faults ∩ Terminated = ∅ - Faults bitfield.BitField - // Subset of faulty sectors expected to recover on next PoSt - // Recoveries ∩ Terminated = ∅ - Recoveries bitfield.BitField - // Subset of sectors terminated but not yet removed from partition (excl. from PoSt) - Terminated bitfield.BitField - // Maps epochs sectors that expire in or before that epoch. - // An expiration may be an "on-time" scheduled expiration, or early "faulty" expiration. - // Keys are quantized to last-in-deadline epochs. - ExpirationsEpochs cid.Cid // AMT[ChainEpoch]ExpirationSet - // Subset of terminated that were before their committed expiration epoch, by termination epoch. - // Termination fees have not yet been calculated or paid and associated deals have not yet been - // canceled but effective power has already been adjusted. - // Not quantized. - EarlyTerminated cid.Cid // AMT[ChainEpoch]BitField - - // Power of not-yet-terminated sectors (incl faulty & unproven). - LivePower PowerPair - // Power of yet-to-be-proved sectors (never faulty). - UnprovenPower PowerPair - // Power of currently-faulty sectors. FaultyPower <= LivePower. - FaultyPower PowerPair - // Power of expected-to-recover sectors. RecoveringPower <= FaultyPower. - RecoveringPower PowerPair -} - -// Bitwidth of AMTs determined empirically from mutation patterns and projections of mainnet data. -const PartitionExpirationAmtBitwidth = 4 -const PartitionEarlyTerminationArrayAmtBitwidth = 3 - -// Value type for a pair of raw and QA power. -type PowerPair struct { - Raw abi.StoragePower - QA abi.StoragePower -} - -// Live sectors are those that are not terminated (but may be faulty). -func (p *Partition) LiveSectors() (bitfield.BitField, error) { - live, err := bitfield.SubtractBitField(p.Sectors, p.Terminated) - if err != nil { - return bitfield.BitField{}, xerrors.Errorf("failed to compute live sectors: %w", err) - } - return live, nil - -} - -// Active sectors are those that are neither terminated nor faulty nor unproven, i.e. actively contributing power. -func (p *Partition) ActiveSectors() (bitfield.BitField, error) { - live, err := p.LiveSectors() - if err != nil { - return bitfield.BitField{}, err - } - nonFaulty, err := bitfield.SubtractBitField(live, p.Faults) - if err != nil { - return bitfield.BitField{}, xerrors.Errorf("failed to compute active sectors: %w", err) - } - active, err := bitfield.SubtractBitField(nonFaulty, p.Unproven) - if err != nil { - return bitfield.BitField{}, xerrors.Errorf("failed to compute active sectors: %w", err) - } - return active, err -} - -// Activates unproven sectors, returning the activated power. -func (p *Partition) ActivateUnproven() PowerPair { - newPower := p.UnprovenPower - p.UnprovenPower = NewPowerPairZero() - p.Unproven = bitfield.New() - return newPower -} - -// -// PowerPair -// - -func NewPowerPairZero() PowerPair { - return NewPowerPair(big.Zero(), big.Zero()) -} - -func NewPowerPair(raw, qa abi.StoragePower) PowerPair { - return PowerPair{Raw: raw, QA: qa} -} - -func (pp PowerPair) Add(other PowerPair) PowerPair { - return PowerPair{ - Raw: big.Add(pp.Raw, other.Raw), - QA: big.Add(pp.QA, other.QA), - } -} - -func (pp PowerPair) Sub(other PowerPair) PowerPair { - return PowerPair{ - Raw: big.Sub(pp.Raw, other.Raw), - QA: big.Sub(pp.QA, other.QA), - } -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/miner/policy.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/miner/policy.go deleted file mode 100644 index f5e2c15..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/miner/policy.go +++ /dev/null @@ -1,176 +0,0 @@ -package miner - -import ( - "github.com/filecoin-project/go-state-types/builtin" - - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - "github.com/ipfs/go-cid" - mh "github.com/multiformats/go-multihash" -) - -// The period over which a miner's active sectors are expected to be proven via WindowPoSt. -// This guarantees that (1) user data is proven daily, (2) user data is stored for 24h by a rational miner -// (due to Window PoSt cost assumption). -var WPoStProvingPeriod = abi.ChainEpoch(builtin.EpochsInDay) // 24 hours PARAM_SPEC - -// The period between the opening and the closing of a WindowPoSt deadline in which the miner is expected to -// provide a Window PoSt proof. -// This provides a miner enough time to compute and propagate a Window PoSt proof. -var WPoStChallengeWindow = abi.ChainEpoch(30 * 60 / builtin.EpochDurationSeconds) // 30 minutes (48 per day) PARAM_SPEC - -// WPoStDisputeWindow is the period after a challenge window ends during which -// PoSts submitted during that period may be disputed. -var WPoStDisputeWindow = 2 * ChainFinality // PARAM_SPEC - -// The number of non-overlapping PoSt deadlines in a proving period. -// This spreads a miner's Window PoSt work across a proving period. -const WPoStPeriodDeadlines = uint64(48) // PARAM_SPEC - -// MaxPartitionsPerDeadline is the maximum number of partitions that will be assigned to a deadline. -// For a minimum storage of upto 1Eib, we need 300 partitions per deadline. -// 48 * 32GiB * 2349 * 300 = 1.00808144 EiB -// So, to support upto 10Eib storage, we set this to 3000. -const MaxPartitionsPerDeadline = 3000 - -// The maximum number of partitions that can be loaded in a single invocation. -// This limits the number of simultaneous fault, recovery, or sector-extension declarations. -// We set this to same as MaxPartitionsPerDeadline so we can process that many partitions every deadline. -const AddressedPartitionsMax = MaxPartitionsPerDeadline - -// Maximum number of unique "declarations" in batch operations. -const DeclarationsMax = AddressedPartitionsMax - -// The maximum number of sector infos that can be loaded in a single invocation. -// This limits the amount of state to be read in a single message execution. -const AddressedSectorsMax = 25_000 // PARAM_SPEC - -// Epochs after which chain state is final with overwhelming probability (hence the likelihood of two fork of this size is negligible) -// This is a conservative value that is chosen via simulations of all known attacks. -const ChainFinality = abi.ChainEpoch(900) // PARAM_SPEC - -// Prefix for sealed sector CIDs (CommR). -var SealedCIDPrefix = cid.Prefix{ - Version: 1, - Codec: cid.FilCommitmentSealed, - MhType: mh.POSEIDON_BLS12_381_A1_FC1, - MhLength: 32, -} - -// List of proof types which may be used when creating a new miner actor. -// This is mutable to allow configuration of testing and development networks. -var WindowPoStProofTypes = map[abi.RegisteredPoStProof]struct{}{ - abi.RegisteredPoStProof_StackedDrgWindow32GiBV1: {}, - abi.RegisteredPoStProof_StackedDrgWindow64GiBV1: {}, -} - -// Maximum delay to allow between sector pre-commit and subsequent proof. -// The allowable delay depends on seal proof algorithm. -var MaxProveCommitDuration = map[abi.RegisteredSealProof]abi.ChainEpoch{ - abi.RegisteredSealProof_StackedDrg32GiBV1: builtin.EpochsInDay + PreCommitChallengeDelay, // PARAM_SPEC - abi.RegisteredSealProof_StackedDrg2KiBV1: builtin.EpochsInDay + PreCommitChallengeDelay, - abi.RegisteredSealProof_StackedDrg8MiBV1: builtin.EpochsInDay + PreCommitChallengeDelay, - abi.RegisteredSealProof_StackedDrg512MiBV1: builtin.EpochsInDay + PreCommitChallengeDelay, - abi.RegisteredSealProof_StackedDrg64GiBV1: builtin.EpochsInDay + PreCommitChallengeDelay, - - abi.RegisteredSealProof_StackedDrg32GiBV1_1: 30*builtin.EpochsInDay + PreCommitChallengeDelay, // PARAM_SPEC - abi.RegisteredSealProof_StackedDrg2KiBV1_1: 30*builtin.EpochsInDay + PreCommitChallengeDelay, - abi.RegisteredSealProof_StackedDrg8MiBV1_1: 30*builtin.EpochsInDay + PreCommitChallengeDelay, - abi.RegisteredSealProof_StackedDrg512MiBV1_1: 30*builtin.EpochsInDay + PreCommitChallengeDelay, - abi.RegisteredSealProof_StackedDrg64GiBV1_1: 30*builtin.EpochsInDay + PreCommitChallengeDelay, -} - -// The maximum number of sector pre-commitments in a single batch. -// 32 sectors per epoch would support a single miner onboarding 1EiB of 32GiB sectors in 1 year. -const PreCommitSectorBatchMaxSize = 256 - -// The maximum number of sector replica updates in a single batch. -// Same as PreCommitSectorBatchMaxSize for consistency -const ProveReplicaUpdatesMaxSize = PreCommitSectorBatchMaxSize - -// Maximum delay between challenge and pre-commitment. -// This prevents a miner sealing sectors far in advance of committing them to the chain, thus committing to a -// particular chain. -var MaxPreCommitRandomnessLookback = builtin.EpochsInDay + ChainFinality // PARAM_SPEC - -// Number of epochs between publishing a sector pre-commitment and when the challenge for interactive PoRep is drawn. -// This (1) prevents a miner predicting a challenge before staking their pre-commit deposit, and -// (2) prevents a miner attempting a long fork in the past to insert a pre-commitment after seeing the challenge. -var PreCommitChallengeDelay = abi.ChainEpoch(150) // PARAM_SPEC - -// Lookback from the deadline's challenge window opening from which to sample chain randomness for the WindowPoSt challenge seed. -// This means that deadline windows can be non-overlapping (which make the programming simpler) without requiring a -// miner to wait for chain stability during the challenge window. -// This value cannot be too large lest it compromise the rationality of honest storage (from Window PoSt cost assumptions). -const WPoStChallengeLookback = abi.ChainEpoch(20) // PARAM_SPEC - -// Minimum period between fault declaration and the next deadline opening. -// If the number of epochs between fault declaration and deadline's challenge window opening is lower than FaultDeclarationCutoff, -// the fault declaration is considered invalid for that deadline. -// This guarantees that a miner is not likely to successfully fork the chain and declare a fault after seeing the challenges. -const FaultDeclarationCutoff = WPoStChallengeLookback + 50 // PARAM_SPEC - -// The maximum age of a fault before the sector is terminated. -// This bounds the time a miner can lose client's data before sacrificing pledge and deal collateral. -var FaultMaxAge = WPoStProvingPeriod * 42 // PARAM_SPEC - -// Staging period for a miner worker key change. -// This delay prevents a miner choosing a more favorable worker key that wins leader elections. -const WorkerKeyChangeDelay = ChainFinality // PARAM_SPEC - -// Minimum number of epochs past the current epoch a sector may be set to expire. -const MinSectorExpiration = 180 * builtin.EpochsInDay // PARAM_SPEC - -// The maximum number of epochs past the current epoch that sector lifetime may be extended. -// A sector may be extended multiple times, however, the total maximum lifetime is also bounded by -// the associated seal proof's maximum lifetime. -const MaxSectorExpirationExtension = 540 * builtin.EpochsInDay // PARAM_SPEC - -// DealWeight and VerifiedDealWeight are spacetime occupied by regular deals and verified deals in a sector. -// Sum of DealWeight and VerifiedDealWeight should be less than or equal to total SpaceTime of a sector. -// Sectors full of VerifiedDeals will have a SectorQuality of VerifiedDealWeightMultiplier/QualityBaseMultiplier. -// Sectors full of Deals will have a SectorQuality of DealWeightMultiplier/QualityBaseMultiplier. -// Sectors with neither will have a SectorQuality of QualityBaseMultiplier/QualityBaseMultiplier. -// SectorQuality of a sector is a weighted average of multipliers based on their proportions. -func QualityForWeight(size abi.SectorSize, duration abi.ChainEpoch, dealWeight, verifiedWeight abi.DealWeight) abi.SectorQuality { - // sectorSpaceTime = size * duration - sectorSpaceTime := big.Mul(big.NewIntUnsigned(uint64(size)), big.NewInt(int64(duration))) - // totalDealSpaceTime = dealWeight + verifiedWeight - totalDealSpaceTime := big.Add(dealWeight, verifiedWeight) - - // Base - all size * duration of non-deals - // weightedBaseSpaceTime = (sectorSpaceTime - totalDealSpaceTime) * QualityBaseMultiplier - weightedBaseSpaceTime := big.Mul(big.Sub(sectorSpaceTime, totalDealSpaceTime), builtin.QualityBaseMultiplier) - // Deal - all deal size * deal duration * 10 - // weightedDealSpaceTime = dealWeight * DealWeightMultiplier - weightedDealSpaceTime := big.Mul(dealWeight, builtin.DealWeightMultiplier) - // Verified - all verified deal size * verified deal duration * 100 - // weightedVerifiedSpaceTime = verifiedWeight * VerifiedDealWeightMultiplier - weightedVerifiedSpaceTime := big.Mul(verifiedWeight, builtin.VerifiedDealWeightMultiplier) - // Sum - sum of all spacetime - // weightedSumSpaceTime = weightedBaseSpaceTime + weightedDealSpaceTime + weightedVerifiedSpaceTime - weightedSumSpaceTime := big.Sum(weightedBaseSpaceTime, weightedDealSpaceTime, weightedVerifiedSpaceTime) - // scaledUpWeightedSumSpaceTime = weightedSumSpaceTime * 2^20 - scaledUpWeightedSumSpaceTime := big.Lsh(weightedSumSpaceTime, builtin.SectorQualityPrecision) - - // Average of weighted space time: (scaledUpWeightedSumSpaceTime / sectorSpaceTime * 10) - return big.Div(big.Div(scaledUpWeightedSumSpaceTime, sectorSpaceTime), builtin.QualityBaseMultiplier) -} - -// The power for a sector size, committed duration, and weight. -func QAPowerForWeight(size abi.SectorSize, duration abi.ChainEpoch, dealWeight, verifiedWeight abi.DealWeight) abi.StoragePower { - quality := QualityForWeight(size, duration, dealWeight, verifiedWeight) - return big.Rsh(big.Mul(big.NewIntUnsigned(uint64(size)), quality), builtin.SectorQualityPrecision) -} - -const MaxAggregatedSectors = 819 -const MinAggregatedSectors = 4 -const MaxAggregateProofSize = 81960 - -// Specification for a linear vesting schedule. -type VestSpec struct { - InitialDelay abi.ChainEpoch // Delay before any amount starts vesting. - VestPeriod abi.ChainEpoch // Period over which the total should vest, after the initial delay. - StepDuration abi.ChainEpoch // Duration between successive incremental vests (independent of vesting period). - Quantization abi.ChainEpoch // Maximum precision of vesting table (limits cardinality of table). -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/power/cbor_gen.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/power/cbor_gen.go deleted file mode 100644 index 666d51b..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/power/cbor_gen.go +++ /dev/null @@ -1,1225 +0,0 @@ -// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. - -package power - -import ( - "fmt" - "io" - - address "github.com/filecoin-project/go-address" - abi "github.com/filecoin-project/go-state-types/abi" - cbg "github.com/whyrusleeping/cbor-gen" - xerrors "golang.org/x/xerrors" -) - -var _ = xerrors.Errorf - -var lengthBufState = []byte{143} - -func (t *State) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufState); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.TotalRawBytePower (big.Int) (struct) - if err := t.TotalRawBytePower.MarshalCBOR(w); err != nil { - return err - } - - // t.TotalBytesCommitted (big.Int) (struct) - if err := t.TotalBytesCommitted.MarshalCBOR(w); err != nil { - return err - } - - // t.TotalQualityAdjPower (big.Int) (struct) - if err := t.TotalQualityAdjPower.MarshalCBOR(w); err != nil { - return err - } - - // t.TotalQABytesCommitted (big.Int) (struct) - if err := t.TotalQABytesCommitted.MarshalCBOR(w); err != nil { - return err - } - - // t.TotalPledgeCollateral (big.Int) (struct) - if err := t.TotalPledgeCollateral.MarshalCBOR(w); err != nil { - return err - } - - // t.ThisEpochRawBytePower (big.Int) (struct) - if err := t.ThisEpochRawBytePower.MarshalCBOR(w); err != nil { - return err - } - - // t.ThisEpochQualityAdjPower (big.Int) (struct) - if err := t.ThisEpochQualityAdjPower.MarshalCBOR(w); err != nil { - return err - } - - // t.ThisEpochPledgeCollateral (big.Int) (struct) - if err := t.ThisEpochPledgeCollateral.MarshalCBOR(w); err != nil { - return err - } - - // t.ThisEpochQAPowerSmoothed (smoothing.FilterEstimate) (struct) - if err := t.ThisEpochQAPowerSmoothed.MarshalCBOR(w); err != nil { - return err - } - - // t.MinerCount (int64) (int64) - if t.MinerCount >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.MinerCount)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.MinerCount-1)); err != nil { - return err - } - } - - // t.MinerAboveMinPowerCount (int64) (int64) - if t.MinerAboveMinPowerCount >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.MinerAboveMinPowerCount)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.MinerAboveMinPowerCount-1)); err != nil { - return err - } - } - - // t.CronEventQueue (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.CronEventQueue); err != nil { - return xerrors.Errorf("failed to write cid field t.CronEventQueue: %w", err) - } - - // t.FirstCronEpoch (abi.ChainEpoch) (int64) - if t.FirstCronEpoch >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.FirstCronEpoch)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.FirstCronEpoch-1)); err != nil { - return err - } - } - - // t.Claims (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.Claims); err != nil { - return xerrors.Errorf("failed to write cid field t.Claims: %w", err) - } - - // t.ProofValidationBatch (cid.Cid) (struct) - - if t.ProofValidationBatch == nil { - if _, err := w.Write(cbg.CborNull); err != nil { - return err - } - } else { - if err := cbg.WriteCidBuf(scratch, w, *t.ProofValidationBatch); err != nil { - return xerrors.Errorf("failed to write cid field t.ProofValidationBatch: %w", err) - } - } - - return nil -} - -func (t *State) UnmarshalCBOR(r io.Reader) error { - *t = State{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 15 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.TotalRawBytePower (big.Int) (struct) - - { - - if err := t.TotalRawBytePower.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.TotalRawBytePower: %w", err) - } - - } - // t.TotalBytesCommitted (big.Int) (struct) - - { - - if err := t.TotalBytesCommitted.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.TotalBytesCommitted: %w", err) - } - - } - // t.TotalQualityAdjPower (big.Int) (struct) - - { - - if err := t.TotalQualityAdjPower.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.TotalQualityAdjPower: %w", err) - } - - } - // t.TotalQABytesCommitted (big.Int) (struct) - - { - - if err := t.TotalQABytesCommitted.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.TotalQABytesCommitted: %w", err) - } - - } - // t.TotalPledgeCollateral (big.Int) (struct) - - { - - if err := t.TotalPledgeCollateral.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.TotalPledgeCollateral: %w", err) - } - - } - // t.ThisEpochRawBytePower (big.Int) (struct) - - { - - if err := t.ThisEpochRawBytePower.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.ThisEpochRawBytePower: %w", err) - } - - } - // t.ThisEpochQualityAdjPower (big.Int) (struct) - - { - - if err := t.ThisEpochQualityAdjPower.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.ThisEpochQualityAdjPower: %w", err) - } - - } - // t.ThisEpochPledgeCollateral (big.Int) (struct) - - { - - if err := t.ThisEpochPledgeCollateral.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.ThisEpochPledgeCollateral: %w", err) - } - - } - // t.ThisEpochQAPowerSmoothed (smoothing.FilterEstimate) (struct) - - { - - if err := t.ThisEpochQAPowerSmoothed.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.ThisEpochQAPowerSmoothed: %w", err) - } - - } - // t.MinerCount (int64) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.MinerCount = int64(extraI) - } - // t.MinerAboveMinPowerCount (int64) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.MinerAboveMinPowerCount = int64(extraI) - } - // t.CronEventQueue (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.CronEventQueue: %w", err) - } - - t.CronEventQueue = c - - } - // t.FirstCronEpoch (abi.ChainEpoch) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.FirstCronEpoch = abi.ChainEpoch(extraI) - } - // t.Claims (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.Claims: %w", err) - } - - t.Claims = c - - } - // t.ProofValidationBatch (cid.Cid) (struct) - - { - - b, err := br.ReadByte() - if err != nil { - return err - } - if b != cbg.CborNull[0] { - if err := br.UnreadByte(); err != nil { - return err - } - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.ProofValidationBatch: %w", err) - } - - t.ProofValidationBatch = &c - } - - } - return nil -} - -var lengthBufClaim = []byte{131} - -func (t *Claim) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufClaim); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.WindowPoStProofType (abi.RegisteredPoStProof) (int64) - if t.WindowPoStProofType >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.WindowPoStProofType)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.WindowPoStProofType-1)); err != nil { - return err - } - } - - // t.RawBytePower (big.Int) (struct) - if err := t.RawBytePower.MarshalCBOR(w); err != nil { - return err - } - - // t.QualityAdjPower (big.Int) (struct) - if err := t.QualityAdjPower.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *Claim) UnmarshalCBOR(r io.Reader) error { - *t = Claim{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 3 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.WindowPoStProofType (abi.RegisteredPoStProof) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.WindowPoStProofType = abi.RegisteredPoStProof(extraI) - } - // t.RawBytePower (big.Int) (struct) - - { - - if err := t.RawBytePower.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.RawBytePower: %w", err) - } - - } - // t.QualityAdjPower (big.Int) (struct) - - { - - if err := t.QualityAdjPower.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.QualityAdjPower: %w", err) - } - - } - return nil -} - -var lengthBufUpdateClaimedPowerParams = []byte{130} - -func (t *UpdateClaimedPowerParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufUpdateClaimedPowerParams); err != nil { - return err - } - - // t.RawByteDelta (big.Int) (struct) - if err := t.RawByteDelta.MarshalCBOR(w); err != nil { - return err - } - - // t.QualityAdjustedDelta (big.Int) (struct) - if err := t.QualityAdjustedDelta.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *UpdateClaimedPowerParams) UnmarshalCBOR(r io.Reader) error { - *t = UpdateClaimedPowerParams{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.RawByteDelta (big.Int) (struct) - - { - - if err := t.RawByteDelta.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.RawByteDelta: %w", err) - } - - } - // t.QualityAdjustedDelta (big.Int) (struct) - - { - - if err := t.QualityAdjustedDelta.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.QualityAdjustedDelta: %w", err) - } - - } - return nil -} - -var lengthBufMinerConstructorParams = []byte{134} - -func (t *MinerConstructorParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufMinerConstructorParams); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.OwnerAddr (address.Address) (struct) - if err := t.OwnerAddr.MarshalCBOR(w); err != nil { - return err - } - - // t.WorkerAddr (address.Address) (struct) - if err := t.WorkerAddr.MarshalCBOR(w); err != nil { - return err - } - - // t.ControlAddrs ([]address.Address) (slice) - if len(t.ControlAddrs) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.ControlAddrs was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.ControlAddrs))); err != nil { - return err - } - for _, v := range t.ControlAddrs { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - - // t.WindowPoStProofType (abi.RegisteredPoStProof) (int64) - if t.WindowPoStProofType >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.WindowPoStProofType)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.WindowPoStProofType-1)); err != nil { - return err - } - } - - // t.PeerId ([]uint8) (slice) - if len(t.PeerId) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.PeerId was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.PeerId))); err != nil { - return err - } - - if _, err := w.Write(t.PeerId[:]); err != nil { - return err - } - - // t.Multiaddrs ([][]uint8) (slice) - if len(t.Multiaddrs) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Multiaddrs was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Multiaddrs))); err != nil { - return err - } - for _, v := range t.Multiaddrs { - if len(v) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field v was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(v))); err != nil { - return err - } - - if _, err := w.Write(v[:]); err != nil { - return err - } - } - return nil -} - -func (t *MinerConstructorParams) UnmarshalCBOR(r io.Reader) error { - *t = MinerConstructorParams{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 6 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.OwnerAddr (address.Address) (struct) - - { - - if err := t.OwnerAddr.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.OwnerAddr: %w", err) - } - - } - // t.WorkerAddr (address.Address) (struct) - - { - - if err := t.WorkerAddr.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.WorkerAddr: %w", err) - } - - } - // t.ControlAddrs ([]address.Address) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.ControlAddrs: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.ControlAddrs = make([]address.Address, extra) - } - - for i := 0; i < int(extra); i++ { - - var v address.Address - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.ControlAddrs[i] = v - } - - // t.WindowPoStProofType (abi.RegisteredPoStProof) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.WindowPoStProofType = abi.RegisteredPoStProof(extraI) - } - // t.PeerId ([]uint8) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.PeerId: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - - if extra > 0 { - t.PeerId = make([]uint8, extra) - } - - if _, err := io.ReadFull(br, t.PeerId[:]); err != nil { - return err - } - // t.Multiaddrs ([][]uint8) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Multiaddrs: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Multiaddrs = make([][]uint8, extra) - } - - for i := 0; i < int(extra); i++ { - { - var maj byte - var extra uint64 - var err error - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.Multiaddrs[i]: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - - if extra > 0 { - t.Multiaddrs[i] = make([]uint8, extra) - } - - if _, err := io.ReadFull(br, t.Multiaddrs[i][:]); err != nil { - return err - } - } - } - - return nil -} - -var lengthBufCreateMinerParams = []byte{133} - -func (t *CreateMinerParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufCreateMinerParams); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Owner (address.Address) (struct) - if err := t.Owner.MarshalCBOR(w); err != nil { - return err - } - - // t.Worker (address.Address) (struct) - if err := t.Worker.MarshalCBOR(w); err != nil { - return err - } - - // t.WindowPoStProofType (abi.RegisteredPoStProof) (int64) - if t.WindowPoStProofType >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.WindowPoStProofType)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.WindowPoStProofType-1)); err != nil { - return err - } - } - - // t.Peer ([]uint8) (slice) - if len(t.Peer) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.Peer was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Peer))); err != nil { - return err - } - - if _, err := w.Write(t.Peer[:]); err != nil { - return err - } - - // t.Multiaddrs ([][]uint8) (slice) - if len(t.Multiaddrs) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Multiaddrs was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Multiaddrs))); err != nil { - return err - } - for _, v := range t.Multiaddrs { - if len(v) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field v was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(v))); err != nil { - return err - } - - if _, err := w.Write(v[:]); err != nil { - return err - } - } - return nil -} - -func (t *CreateMinerParams) UnmarshalCBOR(r io.Reader) error { - *t = CreateMinerParams{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 5 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Owner (address.Address) (struct) - - { - - if err := t.Owner.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Owner: %w", err) - } - - } - // t.Worker (address.Address) (struct) - - { - - if err := t.Worker.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Worker: %w", err) - } - - } - // t.WindowPoStProofType (abi.RegisteredPoStProof) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.WindowPoStProofType = abi.RegisteredPoStProof(extraI) - } - // t.Peer ([]uint8) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.Peer: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - - if extra > 0 { - t.Peer = make([]uint8, extra) - } - - if _, err := io.ReadFull(br, t.Peer[:]); err != nil { - return err - } - // t.Multiaddrs ([][]uint8) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Multiaddrs: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Multiaddrs = make([][]uint8, extra) - } - - for i := 0; i < int(extra); i++ { - { - var maj byte - var extra uint64 - var err error - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.Multiaddrs[i]: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - - if extra > 0 { - t.Multiaddrs[i] = make([]uint8, extra) - } - - if _, err := io.ReadFull(br, t.Multiaddrs[i][:]); err != nil { - return err - } - } - } - - return nil -} - -var lengthBufCreateMinerReturn = []byte{130} - -func (t *CreateMinerReturn) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufCreateMinerReturn); err != nil { - return err - } - - // t.IDAddress (address.Address) (struct) - if err := t.IDAddress.MarshalCBOR(w); err != nil { - return err - } - - // t.RobustAddress (address.Address) (struct) - if err := t.RobustAddress.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *CreateMinerReturn) UnmarshalCBOR(r io.Reader) error { - *t = CreateMinerReturn{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.IDAddress (address.Address) (struct) - - { - - if err := t.IDAddress.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.IDAddress: %w", err) - } - - } - // t.RobustAddress (address.Address) (struct) - - { - - if err := t.RobustAddress.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.RobustAddress: %w", err) - } - - } - return nil -} - -var lengthBufCurrentTotalPowerReturn = []byte{132} - -func (t *CurrentTotalPowerReturn) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufCurrentTotalPowerReturn); err != nil { - return err - } - - // t.RawBytePower (big.Int) (struct) - if err := t.RawBytePower.MarshalCBOR(w); err != nil { - return err - } - - // t.QualityAdjPower (big.Int) (struct) - if err := t.QualityAdjPower.MarshalCBOR(w); err != nil { - return err - } - - // t.PledgeCollateral (big.Int) (struct) - if err := t.PledgeCollateral.MarshalCBOR(w); err != nil { - return err - } - - // t.QualityAdjPowerSmoothed (smoothing.FilterEstimate) (struct) - if err := t.QualityAdjPowerSmoothed.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *CurrentTotalPowerReturn) UnmarshalCBOR(r io.Reader) error { - *t = CurrentTotalPowerReturn{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 4 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.RawBytePower (big.Int) (struct) - - { - - if err := t.RawBytePower.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.RawBytePower: %w", err) - } - - } - // t.QualityAdjPower (big.Int) (struct) - - { - - if err := t.QualityAdjPower.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.QualityAdjPower: %w", err) - } - - } - // t.PledgeCollateral (big.Int) (struct) - - { - - if err := t.PledgeCollateral.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.PledgeCollateral: %w", err) - } - - } - // t.QualityAdjPowerSmoothed (smoothing.FilterEstimate) (struct) - - { - - if err := t.QualityAdjPowerSmoothed.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.QualityAdjPowerSmoothed: %w", err) - } - - } - return nil -} - -var lengthBufEnrollCronEventParams = []byte{130} - -func (t *EnrollCronEventParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufEnrollCronEventParams); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.EventEpoch (abi.ChainEpoch) (int64) - if t.EventEpoch >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.EventEpoch)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.EventEpoch-1)); err != nil { - return err - } - } - - // t.Payload ([]uint8) (slice) - if len(t.Payload) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.Payload was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Payload))); err != nil { - return err - } - - if _, err := w.Write(t.Payload[:]); err != nil { - return err - } - return nil -} - -func (t *EnrollCronEventParams) UnmarshalCBOR(r io.Reader) error { - *t = EnrollCronEventParams{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.EventEpoch (abi.ChainEpoch) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.EventEpoch = abi.ChainEpoch(extraI) - } - // t.Payload ([]uint8) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.Payload: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - - if extra > 0 { - t.Payload = make([]uint8, extra) - } - - if _, err := io.ReadFull(br, t.Payload[:]); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/power/methods.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/power/methods.go deleted file mode 100644 index d5474a2..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/power/methods.go +++ /dev/null @@ -1,18 +0,0 @@ -package power - -import ( - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/proof" -) - -var Methods = []interface{}{ - 1: *new(func(interface{}, *abi.EmptyValue) *abi.EmptyValue), // Constructor - 2: *new(func(interface{}, *CreateMinerParams) *CreateMinerReturn), // CreateMiner - 3: *new(func(interface{}, *UpdateClaimedPowerParams) *abi.EmptyValue), // UpdateClaimedPower - 4: *new(func(interface{}, *EnrollCronEventParams) *abi.EmptyValue), // EnrollCronEvent - 5: *new(func(interface{}, *abi.EmptyValue) *abi.EmptyValue), // CronTick - 6: *new(func(interface{}, *abi.TokenAmount) *abi.EmptyValue), // UpdatePledgeTotal - 7: nil, - 8: *new(func(interface{}, *proof.SealVerifyInfo) *abi.EmptyValue), // SubmitPoRepForBulkVerify - 9: *new(func(interface{}, *abi.EmptyValue) *CurrentTotalPowerReturn), // CurrentTotalPower -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/power/power_state.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/power/power_state.go deleted file mode 100644 index 590c86c..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/power/power_state.go +++ /dev/null @@ -1,165 +0,0 @@ -package power - -import ( - addr "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/go-state-types/builtin" - "github.com/filecoin-project/go-state-types/builtin/v8/util/adt" - "github.com/filecoin-project/go-state-types/builtin/v8/util/smoothing" - "github.com/ipfs/go-cid" - "golang.org/x/xerrors" -) - -// genesis power in bytes = 750,000 GiB -var InitialQAPowerEstimatePosition = big.Mul(big.NewInt(750_000), big.NewInt(1<<30)) - -// max chain throughput in bytes per epoch = 120 ProveCommits / epoch = 3,840 GiB -var InitialQAPowerEstimateVelocity = big.Mul(big.NewInt(3_840), big.NewInt(1<<30)) - -// Bitwidth of CronEventQueue HAMT determined empirically from mutation -// patterns and projections of mainnet data. -const CronQueueHamtBitwidth = 6 - -// Bitwidth of CronEventQueue AMT determined empirically from mutation -// patterns and projections of mainnet data. -const CronQueueAmtBitwidth = 6 - -// Bitwidth of ProofValidationBatch AMT determined empirically from mutation -// pattersn and projections of mainnet data. -const ProofValidationBatchAmtBitwidth = 4 - -// The number of miners that must meet the consensus minimum miner power before that minimum power is enforced -// as a condition of leader election. -// This ensures a network still functions before any miners reach that threshold. -const ConsensusMinerMinMiners = 4 // PARAM_SPEC - -type State struct { - TotalRawBytePower abi.StoragePower - // TotalBytesCommitted includes claims from miners below min power threshold - TotalBytesCommitted abi.StoragePower - TotalQualityAdjPower abi.StoragePower - // TotalQABytesCommitted includes claims from miners below min power threshold - TotalQABytesCommitted abi.StoragePower - TotalPledgeCollateral abi.TokenAmount - - // These fields are set once per epoch in the previous cron tick and used - // for consistent values across a single epoch's state transition. - ThisEpochRawBytePower abi.StoragePower - ThisEpochQualityAdjPower abi.StoragePower - ThisEpochPledgeCollateral abi.TokenAmount - ThisEpochQAPowerSmoothed smoothing.FilterEstimate - - MinerCount int64 - // Number of miners having proven the minimum consensus power. - MinerAboveMinPowerCount int64 - - // A queue of events to be triggered by cron, indexed by epoch. - CronEventQueue cid.Cid // Multimap, (HAMT[ChainEpoch]AMT[CronEvent]) - - // First epoch in which a cron task may be stored. - // Cron will iterate every epoch between this and the current epoch inclusively to find tasks to execute. - FirstCronEpoch abi.ChainEpoch - - // Claimed power for each miner. - Claims cid.Cid // Map, HAMT[address]Claim - - ProofValidationBatch *cid.Cid // Multimap, (HAMT[Address]AMT[SealVerifyInfo]) -} - -func ConstructState(store adt.Store) (*State, error) { - emptyClaimsMapCid, err := adt.StoreEmptyMap(store, builtin.DefaultHamtBitwidth) - if err != nil { - return nil, xerrors.Errorf("failed to create empty map: %w", err) - } - emptyCronQueueMMapCid, err := adt.StoreEmptyMultimap(store, CronQueueHamtBitwidth, CronQueueAmtBitwidth) - if err != nil { - return nil, xerrors.Errorf("failed to create empty multimap: %w", err) - } - - return &State{ - TotalRawBytePower: abi.NewStoragePower(0), - TotalBytesCommitted: abi.NewStoragePower(0), - TotalQualityAdjPower: abi.NewStoragePower(0), - TotalQABytesCommitted: abi.NewStoragePower(0), - TotalPledgeCollateral: abi.NewTokenAmount(0), - ThisEpochRawBytePower: abi.NewStoragePower(0), - ThisEpochQualityAdjPower: abi.NewStoragePower(0), - ThisEpochPledgeCollateral: abi.NewTokenAmount(0), - ThisEpochQAPowerSmoothed: smoothing.NewEstimate(InitialQAPowerEstimatePosition, InitialQAPowerEstimateVelocity), - FirstCronEpoch: 0, - CronEventQueue: emptyCronQueueMMapCid, - Claims: emptyClaimsMapCid, - MinerCount: 0, - MinerAboveMinPowerCount: 0, - }, nil -} - -type Claim struct { - // Miner's proof type used to determine minimum miner size - WindowPoStProofType abi.RegisteredPoStProof - - // Sum of raw byte power for a miner's sectors. - RawBytePower abi.StoragePower - - // Sum of quality adjusted power for a miner's sectors. - QualityAdjPower abi.StoragePower -} - -// MinerNominalPowerMeetsConsensusMinimum is used to validate Election PoSt -// winners outside the chain state. If the miner has over a threshold of power -// the miner meets the minimum. If the network is a below a threshold of -// miners and has power > zero the miner meets the minimum. -func (st *State) MinerNominalPowerMeetsConsensusMinimum(s adt.Store, miner addr.Address) (bool, error) { //nolint:deadcode,unused - claims, err := adt.AsMap(s, st.Claims, builtin.DefaultHamtBitwidth) - if err != nil { - return false, xerrors.Errorf("failed to load claims: %w", err) - } - - claim, ok, err := getClaim(claims, miner) - if err != nil { - return false, err - } - if !ok { - return false, xerrors.Errorf("no claim for actor %w", miner) - } - - minerNominalPower := claim.RawBytePower - minerMinPower, err := builtin.ConsensusMinerMinPower(claim.WindowPoStProofType) - if err != nil { - return false, xerrors.Errorf("could not get miner min power from proof type: %w", err) - } - - // if miner is larger than min power requirement, we're set - if minerNominalPower.GreaterThanEqual(minerMinPower) { - return true, nil - } - - // otherwise, if ConsensusMinerMinMiners miners meet min power requirement, return false - if st.MinerAboveMinPowerCount >= ConsensusMinerMinMiners { - return false, nil - } - - // If fewer than ConsensusMinerMinMiners over threshold miner can win a block with non-zero power - return minerNominalPower.GreaterThan(abi.NewStoragePower(0)), nil -} - -func (st *State) GetClaim(s adt.Store, a addr.Address) (*Claim, bool, error) { - claims, err := adt.AsMap(s, st.Claims, builtin.DefaultHamtBitwidth) - if err != nil { - return nil, false, xerrors.Errorf("failed to load claims: %w", err) - } - return getClaim(claims, a) -} - -func getClaim(claims *adt.Map, a addr.Address) (*Claim, bool, error) { - var out Claim - found, err := claims.Get(abi.AddrKey(a), &out) - if err != nil { - return nil, false, xerrors.Errorf("failed to get claim for address %v: %w", a, err) - } - if !found { - return nil, false, nil - } - return &out, true, nil -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/power/power_types.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/power/power_types.go deleted file mode 100644 index b60ffae..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/power/power_types.go +++ /dev/null @@ -1,50 +0,0 @@ -package power - -import ( - addr "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/builtin/v8/util/smoothing" -) - -// Storage miner actor constructor params are defined here so the power actor can send them to the init actor -// to instantiate miners. -// Changed since v2: -// - Seal proof type replaced with PoSt proof type -type MinerConstructorParams struct { - OwnerAddr addr.Address - WorkerAddr addr.Address - ControlAddrs []addr.Address - WindowPoStProofType abi.RegisteredPoStProof - PeerId abi.PeerID - Multiaddrs []abi.Multiaddrs -} - -type CreateMinerParams struct { - Owner addr.Address - Worker addr.Address - WindowPoStProofType abi.RegisteredPoStProof - Peer abi.PeerID - Multiaddrs []abi.Multiaddrs -} - -type CreateMinerReturn struct { - IDAddress addr.Address // The canonical ID-based address for the actor. - RobustAddress addr.Address // A more expensive but re-org-safe address for the newly created actor. -} - -type UpdateClaimedPowerParams struct { - RawByteDelta abi.StoragePower - QualityAdjustedDelta abi.StoragePower -} - -type EnrollCronEventParams struct { - EventEpoch abi.ChainEpoch - Payload []byte -} - -type CurrentTotalPowerReturn struct { - RawBytePower abi.StoragePower - QualityAdjPower abi.StoragePower - PledgeCollateral abi.TokenAmount - QualityAdjPowerSmoothed smoothing.FilterEstimate -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/system/cbor_gen.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/system/cbor_gen.go deleted file mode 100644 index bd4eeeb..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/system/cbor_gen.go +++ /dev/null @@ -1,68 +0,0 @@ -// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. - -package system - -import ( - "fmt" - "io" - - cbg "github.com/whyrusleeping/cbor-gen" - xerrors "golang.org/x/xerrors" -) - -var _ = xerrors.Errorf - -var lengthBufState = []byte{129} - -func (t *State) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufState); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.BuiltinActors (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.BuiltinActors); err != nil { - return xerrors.Errorf("failed to write cid field t.BuiltinActors: %w", err) - } - - return nil -} - -func (t *State) UnmarshalCBOR(r io.Reader) error { - *t = State{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.BuiltinActors (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.BuiltinActors: %w", err) - } - - t.BuiltinActors = c - - } - return nil -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/system/methods.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/system/methods.go deleted file mode 100644 index c904705..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/system/methods.go +++ /dev/null @@ -1,9 +0,0 @@ -package system - -import ( - "github.com/filecoin-project/go-state-types/abi" -) - -var Methods = []interface{}{ - 1: *new(func(interface{}, *abi.EmptyValue) *abi.EmptyValue), // Constructor -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/system/system_actor_state.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/system/system_actor_state.go deleted file mode 100644 index e7bb467..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/system/system_actor_state.go +++ /dev/null @@ -1,25 +0,0 @@ -package system - -import ( - "context" - - "github.com/filecoin-project/go-state-types/manifest" - - "github.com/filecoin-project/go-state-types/builtin/v8/util/adt" - - "github.com/ipfs/go-cid" - "golang.org/x/xerrors" -) - -type State struct { - BuiltinActors cid.Cid // ManifestData -} - -func ConstructState(store adt.Store) (*State, error) { - empty, err := store.Put(context.TODO(), &manifest.ManifestData{}) - if err != nil { - return nil, xerrors.Errorf("failed to create empty manifest: %w", err) - } - - return &State{BuiltinActors: empty}, nil -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/util/adt/array.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/util/adt/array.go deleted file mode 100644 index edcdf68..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/util/adt/array.go +++ /dev/null @@ -1,152 +0,0 @@ -package adt - -import ( - "bytes" - - amt "github.com/filecoin-project/go-amt-ipld/v4" - - "github.com/filecoin-project/go-state-types/cbor" - cid "github.com/ipfs/go-cid" - cbg "github.com/whyrusleeping/cbor-gen" - "golang.org/x/xerrors" -) - -var DefaultAmtOptions = []amt.Option{} - -// Array stores a sparse sequence of values in an AMT. -type Array struct { - root *amt.Root - store Store -} - -// AsArray interprets a store as an AMT-based array with root `r`. -func AsArray(s Store, r cid.Cid, bitwidth int) (*Array, error) { - options := append(DefaultAmtOptions, amt.UseTreeBitWidth(uint(bitwidth))) - root, err := amt.LoadAMT(s.Context(), s, r, options...) - if err != nil { - return nil, xerrors.Errorf("failed to root: %w", err) - } - - return &Array{ - root: root, - store: s, - }, nil -} - -// Creates a new array backed by an empty AMT. -func MakeEmptyArray(s Store, bitwidth int) (*Array, error) { - options := append(DefaultAmtOptions, amt.UseTreeBitWidth(uint(bitwidth))) - root, err := amt.NewAMT(s, options...) - if err != nil { - return nil, err - } - return &Array{ - root: root, - store: s, - }, nil -} - -// Writes a new empty array to the store, returning its CID. -func StoreEmptyArray(s Store, bitwidth int) (cid.Cid, error) { - arr, err := MakeEmptyArray(s, bitwidth) - if err != nil { - return cid.Undef, err - } - return arr.Root() -} - -// Returns the root CID of the underlying AMT. -func (a *Array) Root() (cid.Cid, error) { - return a.root.Flush(a.store.Context()) -} - -// Appends a value to the end of the array. Assumes continuous array. -// If the array isn't continuous use Set and a separate counter -func (a *Array) AppendContinuous(value cbor.Marshaler) error { - if err := a.root.Set(a.store.Context(), a.root.Len(), value); err != nil { - return xerrors.Errorf("append failed to set index %v value %v in root %v: %w", a.root.Len(), value, a.root, err) - } - return nil -} - -func (a *Array) Set(i uint64, value cbor.Marshaler) error { - if err := a.root.Set(a.store.Context(), i, value); err != nil { - return xerrors.Errorf("failed to set index %v value %v in root %v: %w", i, value, a.root, err) - } - return nil -} - -// Removes the value at index `i` from the AMT, if it exists. -// Returns whether the index was previously present. -func (a *Array) TryDelete(i uint64) (bool, error) { - if found, err := a.root.Delete(a.store.Context(), i); err != nil { - return false, xerrors.Errorf("array delete failed to delete index %v in root %v: %w", i, a.root, err) - } else { - return found, nil - } -} - -// Removes the value at index `i` from the AMT, expecting it to exist. -func (a *Array) Delete(i uint64) error { - if found, err := a.root.Delete(a.store.Context(), i); err != nil { - return xerrors.Errorf("failed to delete index %v in root %v: %w", i, a.root, err) - } else if !found { - return xerrors.Errorf("no such index %v in root %v to delete: %w", i, a.root, err) - } - return nil -} - -func (a *Array) BatchDelete(ix []uint64, strict bool) error { - if _, err := a.root.BatchDelete(a.store.Context(), ix, strict); err != nil { - return xerrors.Errorf("failed to batch delete keys %v: %w", ix, err) - } - return nil -} - -// Iterates all entries in the array, deserializing each value in turn into `out` and then calling a function. -// Iteration halts if the function returns an error. -// If the output parameter is nil, deserialization is skipped. -func (a *Array) ForEach(out cbor.Unmarshaler, fn func(i int64) error) error { - return a.root.ForEach(a.store.Context(), func(k uint64, val *cbg.Deferred) error { - if out != nil { - if deferred, ok := out.(*cbg.Deferred); ok { - // fast-path deferred -> deferred to avoid re-decoding. - *deferred = *val - } else if err := out.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { - return err - } - } - return fn(int64(k)) - }) -} - -func (a *Array) Length() uint64 { - return a.root.Len() -} - -// Get retrieves array element into the 'out' unmarshaler, returning a boolean -// indicating whether the element was found in the array -func (a *Array) Get(k uint64, out cbor.Unmarshaler) (bool, error) { - if found, err := a.root.Get(a.store.Context(), k, out); err != nil { - return false, xerrors.Errorf("failed to get index %v in root %v: %w", k, a.root, err) - } else { - return found, nil - } -} - -// Retrieves an array value into the 'out' unmarshaler (if non-nil), and removes the entry. -// Returns a boolean indicating whether the element was previously in the array. -func (a *Array) Pop(k uint64, out cbor.Unmarshaler) (bool, error) { - if found, err := a.root.Get(a.store.Context(), k, out); err != nil { - return false, xerrors.Errorf("failed to get index %v in root %v: %w", k, a.root, err) - } else if !found { - return false, nil - } - - if found, err := a.root.Delete(a.store.Context(), k); err != nil { - return false, xerrors.Errorf("failed to delete index %v in root %v: %w", k, a.root, err) - } else if !found { - return false, xerrors.Errorf("can't find index %v to delete in root %v", k, a.root) - } - return true, nil -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/util/adt/balancetable.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/util/adt/balancetable.go deleted file mode 100644 index f31e9c6..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/util/adt/balancetable.go +++ /dev/null @@ -1,40 +0,0 @@ -package adt - -import ( - addr "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - cid "github.com/ipfs/go-cid" -) - -// Bitwidth of balance table HAMTs, determined empirically from mutation -// patterns and projections of mainnet data -const BalanceTableBitwidth = 6 - -// A specialization of a map of addresses to (positive) token amounts. -// Absent keys implicitly have a balance of zero. -type BalanceTable Map - -// Interprets a store as balance table with root `r`. -func AsBalanceTable(s Store, r cid.Cid) (*BalanceTable, error) { - m, err := AsMap(s, r, BalanceTableBitwidth) - if err != nil { - return nil, err - } - - return &BalanceTable{ - root: m.root, - store: s, - }, nil -} - -// Gets the balance for a key, which is zero if they key has never been added to. -func (t *BalanceTable) Get(key addr.Address) (abi.TokenAmount, error) { - var value abi.TokenAmount - found, err := (*Map)(t).Get(abi.AddrKey(key), &value) - if !found || err != nil { - value = big.Zero() - } - - return value, err -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/util/adt/map.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/util/adt/map.go deleted file mode 100644 index de8bcb7..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/util/adt/map.go +++ /dev/null @@ -1,182 +0,0 @@ -package adt - -import ( - "bytes" - "crypto/sha256" - - hamt "github.com/filecoin-project/go-hamt-ipld/v3" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/cbor" - cid "github.com/ipfs/go-cid" - cbg "github.com/whyrusleeping/cbor-gen" - "golang.org/x/xerrors" -) - -// DefaultHamtOptions specifies default options used to construct Filecoin HAMTs. -// Specific HAMT instances may specify additional options, especially the bitwidth. -var DefaultHamtOptions = []hamt.Option{ - hamt.UseHashFunction(func(input []byte) []byte { - res := sha256.Sum256(input) - return res[:] - }), -} - -// Map stores key-value pairs in a HAMT. -type Map struct { - lastCid cid.Cid - root *hamt.Node - store Store -} - -// AsMap interprets a store as a HAMT-based map with root `r`. -// The HAMT is interpreted with branching factor 2^bitwidth. -// We could drop this parameter if https://github.com/filecoin-project/go-hamt-ipld/issues/79 is implemented. -func AsMap(s Store, root cid.Cid, bitwidth int) (*Map, error) { - options := append(DefaultHamtOptions, hamt.UseTreeBitWidth(bitwidth)) - nd, err := hamt.LoadNode(s.Context(), s, root, options...) - if err != nil { - return nil, xerrors.Errorf("failed to load hamt node: %w", err) - } - - return &Map{ - lastCid: root, - root: nd, - store: s, - }, nil -} - -// Creates a new map backed by an empty HAMT. -func MakeEmptyMap(s Store, bitwidth int) (*Map, error) { - options := append(DefaultHamtOptions, hamt.UseTreeBitWidth(bitwidth)) - nd, err := hamt.NewNode(s, options...) - if err != nil { - return nil, err - } - return &Map{ - lastCid: cid.Undef, - root: nd, - store: s, - }, nil -} - -// Creates and stores a new empty map, returning its CID. -func StoreEmptyMap(s Store, bitwidth int) (cid.Cid, error) { - m, err := MakeEmptyMap(s, bitwidth) - if err != nil { - return cid.Undef, err - } - return m.Root() -} - -// Returns the root cid of underlying HAMT. -func (m *Map) Root() (cid.Cid, error) { - if err := m.root.Flush(m.store.Context()); err != nil { - return cid.Undef, xerrors.Errorf("failed to flush map root: %w", err) - } - - c, err := m.store.Put(m.store.Context(), m.root) - if err != nil { - return cid.Undef, xerrors.Errorf("writing map root object: %w", err) - } - m.lastCid = c - - return c, nil -} - -// Put adds value `v` with key `k` to the hamt store. -func (m *Map) Put(k abi.Keyer, v cbor.Marshaler) error { - if err := m.root.Set(m.store.Context(), k.Key(), v); err != nil { - return xerrors.Errorf("failed to set key %v value %v in node %v: %w", k.Key(), v, m.lastCid, err) - } - return nil -} - -// Get retrieves the value at `k` into `out`, if the `k` is present and `out` is non-nil. -// Returns whether the key was found. -func (m *Map) Get(k abi.Keyer, out cbor.Unmarshaler) (bool, error) { - if found, err := m.root.Find(m.store.Context(), k.Key(), out); err != nil { - return false, xerrors.Errorf("failed to get key %v in node %v: %w", m.lastCid, k.Key(), err) - } else { - return found, nil - } -} - -// Has checks for the existence of a key without deserializing its value. -func (m *Map) Has(k abi.Keyer) (bool, error) { - if found, err := m.root.Find(m.store.Context(), k.Key(), nil); err != nil { - return false, xerrors.Errorf("failed to check key %v in node %v: %w", m.lastCid, k.Key(), err) - } else { - return found, nil - } -} - -// Sets key key `k` to value `v` iff the key is not already present. -func (m *Map) PutIfAbsent(k abi.Keyer, v cbor.Marshaler) (bool, error) { - if modified, err := m.root.SetIfAbsent(m.store.Context(), k.Key(), v); err != nil { - return false, xerrors.Errorf("failed to set key %v value %v in node %v: %w", k.Key(), v, m.lastCid, err) - } else { - return modified, nil - } -} - -// Removes the value at `k` from the hamt store, if it exists. -// Returns whether the key was previously present. -func (m *Map) TryDelete(k abi.Keyer) (bool, error) { - if found, err := m.root.Delete(m.store.Context(), k.Key()); err != nil { - return false, xerrors.Errorf("failed to delete key %v in node %v: %v", k.Key(), m.root, err) - } else { - return found, nil - } -} - -// Removes the value at `k` from the hamt store, expecting it to exist. -func (m *Map) Delete(k abi.Keyer) error { - if found, err := m.root.Delete(m.store.Context(), k.Key()); err != nil { - return xerrors.Errorf("failed to delete key %v in node %v: %v", k.Key(), m.root, err) - } else if !found { - return xerrors.Errorf("no such key %v to delete in node %v", k.Key(), m.root) - } - return nil -} - -// Iterates all entries in the map, deserializing each value in turn into `out` and then -// calling a function with the corresponding key. -// Iteration halts if the function returns an error. -// If the output parameter is nil, deserialization is skipped. -func (m *Map) ForEach(out cbor.Unmarshaler, fn func(key string) error) error { - return m.root.ForEach(m.store.Context(), func(k string, val *cbg.Deferred) error { - if out != nil { - // Why doesn't hamt.ForEach() just return the value as bytes? - err := out.UnmarshalCBOR(bytes.NewReader(val.Raw)) - if err != nil { - return err - } - } - return fn(k) - }) -} - -// Collects all the keys from the map into a slice of strings. -func (m *Map) CollectKeys() (out []string, err error) { - err = m.ForEach(nil, func(key string) error { - out = append(out, key) - return nil - }) - return -} - -// Retrieves the value for `k` into the 'out' unmarshaler (if non-nil), and removes the entry. -// Returns a boolean indicating whether the element was previously in the map. -func (m *Map) Pop(k abi.Keyer, out cbor.Unmarshaler) (bool, error) { - key := k.Key() - if found, err := m.root.Find(m.store.Context(), key, out); err != nil || !found { - return found, err - } - - if found, err := m.root.Delete(m.store.Context(), key); err != nil { - return false, err - } else if !found { - return false, xerrors.Errorf("failed to find key %v to delete", k.Key()) - } - return true, nil -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/util/adt/multimap.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/util/adt/multimap.go deleted file mode 100644 index 72d7452..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/util/adt/multimap.go +++ /dev/null @@ -1,34 +0,0 @@ -package adt - -import "github.com/ipfs/go-cid" - -// Multimap stores multiple values per key in a HAMT of AMTs. -// The order of insertion of values for each key is retained. -type Multimap struct { - mp *Map - innerBitwidth int -} - -// Creates a new map backed by an empty HAMT and flushes it to the store. -// The outer map has a branching factor of 2^bitwidth. -func MakeEmptyMultimap(s Store, outerBitwidth, innerBitwidth int) (*Multimap, error) { - m, err := MakeEmptyMap(s, outerBitwidth) - if err != nil { - return nil, err - } - return &Multimap{m, innerBitwidth}, nil -} - -// Creates and stores a new empty multimap, returning its CID. -func StoreEmptyMultimap(store Store, outerBitwidth, innerBitwidth int) (cid.Cid, error) { - mmap, err := MakeEmptyMultimap(store, outerBitwidth, innerBitwidth) - if err != nil { - return cid.Undef, err - } - return mmap.Root() -} - -// Returns the root cid of the underlying HAMT. -func (mm *Multimap) Root() (cid.Cid, error) { - return mm.mp.Root() -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/util/adt/store.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/util/adt/store.go deleted file mode 100644 index 004b409..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/util/adt/store.go +++ /dev/null @@ -1,32 +0,0 @@ -package adt - -import ( - "context" - - ipldcbor "github.com/ipfs/go-ipld-cbor" -) - -// Store defines an interface required to back the ADTs in this package. -type Store interface { - Context() context.Context - ipldcbor.IpldStore -} - -// Adapts a vanilla IPLD store as an ADT store. -func WrapStore(ctx context.Context, store ipldcbor.IpldStore) Store { - return &wstore{ - ctx: ctx, - IpldStore: store, - } -} - -type wstore struct { - ctx context.Context - ipldcbor.IpldStore -} - -var _ Store = &wstore{} - -func (s *wstore) Context() context.Context { - return s.ctx -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/util/math/expneg.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/util/math/expneg.go deleted file mode 100644 index 6432230..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/util/math/expneg.go +++ /dev/null @@ -1,61 +0,0 @@ -package math - -import ( - "math/big" -) - -var ( - // Coefficents in Q.128 format - expNumCoef []*big.Int - expDenoCoef []*big.Int -) - -func init() { - - // parameters are in integer format, - // coefficients are *2^-128 of that - // so we can just load them if we treat them as Q.128 - num := []string{ - "-648770010757830093818553637600", - "67469480939593786226847644286976", - "-3197587544499098424029388939001856", - "89244641121992890118377641805348864", - "-1579656163641440567800982336819953664", - "17685496037279256458459817590917169152", - "-115682590513835356866803355398940131328", - "340282366920938463463374607431768211456", - } - expNumCoef = Parse(num) - - deno := []string{ - "1225524182432722209606361", - "114095592300906098243859450", - "5665570424063336070530214243", - "194450132448609991765137938448", - "5068267641632683791026134915072", - "104716890604972796896895427629056", - "1748338658439454459487681798864896", - "23704654329841312470660182937960448", - "259380097567996910282699886670381056", - "2250336698853390384720606936038375424", - "14978272436876548034486263159246028800", - "72144088983913131323343765784380833792", - "224599776407103106596571252037123047424", - "340282366920938463463374607431768211456", - } - expDenoCoef = Parse(deno) -} - -// ExpNeg accepts x in Q.128 format and computes e^-x. -// It is most precise within [0, 1.725) range, where error is less than 3.4e-30. -// Over the [0, 5) range its error is less than 4.6e-15. -// Output is in Q.128 format. -func ExpNeg(x *big.Int) *big.Int { - // exp is approximated by rational function - // polynomials of the rational function are evaluated using Horner's method - num := Polyval(expNumCoef, x) // Q.128 - deno := Polyval(expDenoCoef, x) // Q.128 - - num = num.Lsh(num, Precision128) // Q.256 - return num.Div(num, deno) // Q.256 / Q.128 => Q.128 -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/util/math/ln.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/util/math/ln.go deleted file mode 100644 index 4d21b23..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/util/math/ln.go +++ /dev/null @@ -1,78 +0,0 @@ -package math - -import ( - gbig "math/big" - - "github.com/filecoin-project/go-state-types/big" -) - -var ( - // Coefficients in Q.128 format - lnNumCoef []*gbig.Int - lnDenomCoef []*gbig.Int - ln2 big.Int -) - -func init() { - // ln approximation coefficients - // parameters are in integer format, - // coefficients are *2^-128 of that - // so we can just load them if we treat them as Q.128 - num := []string{ - "261417938209272870992496419296200268025", - "7266615505142943436908456158054846846897", - "32458783941900493142649393804518050491988", - "17078670566130897220338060387082146864806", - "-35150353308172866634071793531642638290419", - "-20351202052858059355702509232125230498980", - "-1563932590352680681114104005183375350999", - } - lnNumCoef = Parse(num) - - denom := []string{ - "49928077726659937662124949977867279384", - "2508163877009111928787629628566491583994", - "21757751789594546643737445330202599887121", - "53400635271583923415775576342898617051826", - "41248834748603606604000911015235164348839", - "9015227820322455780436733526367238305537", - "340282366920938463463374607431768211456", - } - lnDenomCoef = Parse(denom) - - constStrs := []string{ - "235865763225513294137944142764154484399", // ln(2) - } - constBigs := Parse(constStrs) - ln2 = big.NewFromGo(constBigs[0]) -} - -// The natural log of Q.128 x. -func Ln(z big.Int) big.Int { - // bitlen - 1 - precision - k := int64(z.BitLen()) - 1 - Precision128 // Q.0 - x := big.Zero() // nolint:ineffassign - - if k > 0 { - x = big.Rsh(z, uint(k)) // Q.128 - } else { - x = big.Lsh(z, uint(-k)) // Q.128 - } - - // ln(z) = ln(x * 2^k) = ln(x) + k * ln2 - lnz := big.Mul(big.NewInt(k), ln2) // Q.0 * Q.128 => Q.128 - return big.Sum(lnz, lnBetweenOneAndTwo(x)) // Q.128 -} - -// The natural log of x, specified in Q.128 format -// Should only use with 1 <= x <= 2 -// Output is in Q.128 format. -func lnBetweenOneAndTwo(x big.Int) big.Int { - // ln is approximated by rational function - // polynomials of the rational function are evaluated using Horner's method - num := Polyval(lnNumCoef, x.Int) // Q.128 - denom := Polyval(lnDenomCoef, x.Int) // Q.128 - - num = num.Lsh(num, Precision128) // Q.128 => Q.256 - return big.NewFromGo(num.Div(num, denom)) // Q.256 / Q.128 => Q.128 -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/util/math/parse.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/util/math/parse.go deleted file mode 100644 index aee6c16..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/util/math/parse.go +++ /dev/null @@ -1,18 +0,0 @@ -package math - -import "math/big" - -// Parse a slice of strings (representing integers in decimal) -// Convention: this function is to be applied to strings representing Q.128 fixed-point numbers, and thus returns numbers in binary Q.128 representation -func Parse(coefs []string) []*big.Int { - out := make([]*big.Int, len(coefs)) - for i, coef := range coefs { - c, ok := new(big.Int).SetString(coef, 10) - if !ok { - panic("could not parse q128 parameter") - } - // << 128 (Q.0 to Q.128) >> 128 to transform integer params to coefficients - out[i] = c - } - return out -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/util/math/polyval.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/util/math/polyval.go deleted file mode 100644 index 81412d6..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/util/math/polyval.go +++ /dev/null @@ -1,22 +0,0 @@ -package math - -import "math/big" - -// note: all coefficients for which Polyval is used would need to be updated if this precision changes -const Precision128 = 128 - -// polyval evaluates a polynomial given by coefficients `p` in Q.128 format -// at point `x` in Q.128 format. Output is in Q.128. -// Coefficients should be ordered from the highest order coefficient to the lowest. -func Polyval(p []*big.Int, x *big.Int) *big.Int { - // evaluation using Horner's method - res := new(big.Int).Set(p[0]) // Q.128 - tmp := new(big.Int) // big.Int.Mul doesn't like when input is reused as output - for _, c := range p[1:] { - tmp = tmp.Mul(res, x) // Q.128 * Q.128 => Q.256 - res = res.Rsh(tmp, Precision128) // Q.256 >> 128 => Q.128 - res = res.Add(res, c) - } - - return res -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/util/smoothing/alpha_beta_filter.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/util/smoothing/alpha_beta_filter.go deleted file mode 100644 index b197abd..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/util/smoothing/alpha_beta_filter.go +++ /dev/null @@ -1,101 +0,0 @@ -package smoothing - -import ( - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/go-state-types/builtin/v8/util/math" -) - -var ( - DefaultAlpha big.Int // Q.128 value of 9.25e-4 - DefaultBeta big.Int // Q.128 value of 2.84e-7 - - ExtrapolatedCumSumRatioEpsilon big.Int // Q.128 value of 2^-50 -) - -func init() { - // Alpha Beta Filter constants - constStrs := []string{ - "314760000000000000000000000000000000", // DefaultAlpha - "96640100000000000000000000000000", // DefaultBeta - "302231454903657293676544", // Epsilon - - } - constBigs := math.Parse(constStrs) - DefaultAlpha = big.NewFromGo(constBigs[0]) - DefaultBeta = big.NewFromGo(constBigs[1]) - ExtrapolatedCumSumRatioEpsilon = big.NewFromGo(constBigs[2]) - -} - -//Alpha Beta Filter "position" (value) and "velocity" (rate of change of value) estimates -//Estimates are in Q.128 format -type FilterEstimate struct { - PositionEstimate big.Int // Q.128 - VelocityEstimate big.Int // Q.128 -} - -// Returns the Q.0 position estimate of the filter -func Estimate(fe *FilterEstimate) big.Int { - return big.Rsh(fe.PositionEstimate, math.Precision128) // Q.128 => Q.0 -} - -// Create a new filter estimate given two Q.0 format ints. -func NewEstimate(position, velocity big.Int) FilterEstimate { - return FilterEstimate{ - PositionEstimate: big.Lsh(position, math.Precision128), // Q.0 => Q.128 - VelocityEstimate: big.Lsh(velocity, math.Precision128), // Q.0 => Q.128 - } -} - -// Extrapolate the CumSumRatio given two filters. -// Output is in Q.128 format -func ExtrapolatedCumSumOfRatio(delta abi.ChainEpoch, relativeStart abi.ChainEpoch, estimateNum, estimateDenom FilterEstimate) big.Int { - deltaT := big.Lsh(big.NewInt(int64(delta)), math.Precision128) // Q.0 => Q.128 - t0 := big.Lsh(big.NewInt(int64(relativeStart)), math.Precision128) // Q.0 => Q.128 - // Renaming for ease of following spec and clarity - position1 := estimateNum.PositionEstimate - position2 := estimateDenom.PositionEstimate - velocity1 := estimateNum.VelocityEstimate - velocity2 := estimateDenom.VelocityEstimate - - squaredVelocity2 := big.Mul(velocity2, velocity2) // Q.128 * Q.128 => Q.256 - squaredVelocity2 = big.Rsh(squaredVelocity2, math.Precision128) // Q.256 => Q.128 - - if squaredVelocity2.GreaterThan(ExtrapolatedCumSumRatioEpsilon) { - x2a := big.Mul(t0, velocity2) // Q.128 * Q.128 => Q.256 - x2a = big.Rsh(x2a, math.Precision128) // Q.256 => Q.128 - x2a = big.Sum(position2, x2a) - - x2b := big.Mul(deltaT, velocity2) // Q.128 * Q.128 => Q.256 - x2b = big.Rsh(x2b, math.Precision128) // Q.256 => Q.128 - x2b = big.Sum(x2a, x2b) - - x2a = math.Ln(x2a) // Q.128 - x2b = math.Ln(x2b) // Q.128 - - m1 := big.Sub(x2b, x2a) - m1 = big.Mul(velocity2, big.Mul(position1, m1)) // Q.128 * Q.128 * Q.128 => Q.384 - m1 = big.Rsh(m1, math.Precision128) //Q.384 => Q.256 - - m2L := big.Sub(x2a, x2b) - m2L = big.Mul(position2, m2L) // Q.128 * Q.128 => Q.256 - m2R := big.Mul(velocity2, deltaT) // Q.128 * Q.128 => Q.256 - m2 := big.Sum(m2L, m2R) - m2 = big.Mul(velocity1, m2) // Q.256 => Q.384 - m2 = big.Rsh(m2, math.Precision128) //Q.384 => Q.256 - - return big.Div(big.Sum(m1, m2), squaredVelocity2) // Q.256 / Q.128 => Q.128 - - } - - halfDeltaT := big.Rsh(deltaT, 1) // Q.128 / Q.0 => Q.128 - x1m := big.Mul(velocity1, big.Sum(t0, halfDeltaT)) // Q.128 * Q.128 => Q.256 - x1m = big.Rsh(x1m, math.Precision128) // Q.256 => Q.128 - x1m = big.Add(position1, x1m) - - cumsumRatio := big.Mul(x1m, deltaT) // Q.128 * Q.128 => Q.256 - cumsumRatio = big.Div(cumsumRatio, position2) // Q.256 / Q.128 => Q.128 - return cumsumRatio - -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/util/smoothing/cbor_gen.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v8/util/smoothing/cbor_gen.go deleted file mode 100644 index c7742ef..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v8/util/smoothing/cbor_gen.go +++ /dev/null @@ -1,75 +0,0 @@ -// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. - -package smoothing - -import ( - "fmt" - "io" - - cbg "github.com/whyrusleeping/cbor-gen" - xerrors "golang.org/x/xerrors" -) - -var _ = xerrors.Errorf - -var lengthBufFilterEstimate = []byte{130} - -func (t *FilterEstimate) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufFilterEstimate); err != nil { - return err - } - - // t.PositionEstimate (big.Int) (struct) - if err := t.PositionEstimate.MarshalCBOR(w); err != nil { - return err - } - - // t.VelocityEstimate (big.Int) (struct) - if err := t.VelocityEstimate.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *FilterEstimate) UnmarshalCBOR(r io.Reader) error { - *t = FilterEstimate{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.PositionEstimate (big.Int) (struct) - - { - - if err := t.PositionEstimate.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.PositionEstimate: %w", err) - } - - } - // t.VelocityEstimate (big.Int) (struct) - - { - - if err := t.VelocityEstimate.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.VelocityEstimate: %w", err) - } - - } - return nil -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/migration/cbor_gen.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/migration/cbor_gen.go deleted file mode 100644 index 807999d..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/migration/cbor_gen.go +++ /dev/null @@ -1,119 +0,0 @@ -// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. - -package migration - -import ( - "fmt" - "io" - - cbg "github.com/whyrusleeping/cbor-gen" - xerrors "golang.org/x/xerrors" -) - -var _ = xerrors.Errorf - -var lengthBufActor = []byte{132} - -func (t *Actor) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufActor); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Code (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.Code); err != nil { - return xerrors.Errorf("failed to write cid field t.Code: %w", err) - } - - // t.Head (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.Head); err != nil { - return xerrors.Errorf("failed to write cid field t.Head: %w", err) - } - - // t.CallSeqNum (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.CallSeqNum)); err != nil { - return err - } - - // t.Balance (big.Int) (struct) - if err := t.Balance.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *Actor) UnmarshalCBOR(r io.Reader) error { - *t = Actor{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 4 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Code (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.Code: %w", err) - } - - t.Code = c - - } - // t.Head (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.Head: %w", err) - } - - t.Head = c - - } - // t.CallSeqNum (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.CallSeqNum = uint64(extra) - - } - // t.Balance (big.Int) (struct) - - { - - if err := t.Balance.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Balance: %w", err) - } - - } - return nil -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/migration/miner.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/migration/miner.go deleted file mode 100644 index 7855dee..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/migration/miner.go +++ /dev/null @@ -1,168 +0,0 @@ -package migration - -import ( - "context" - - "github.com/filecoin-project/go-state-types/builtin/v8/market" - - "golang.org/x/xerrors" - - commp "github.com/filecoin-project/go-commp-utils/nonffi" - "github.com/filecoin-project/go-state-types/builtin" - miner8 "github.com/filecoin-project/go-state-types/builtin/v8/miner" - "github.com/filecoin-project/go-state-types/builtin/v8/util/adt" - miner9 "github.com/filecoin-project/go-state-types/builtin/v9/miner" - - "github.com/ipfs/go-cid" - cbor "github.com/ipfs/go-ipld-cbor" - - "github.com/filecoin-project/go-state-types/abi" -) - -type minerMigrator struct { - proposals *market.DealArray - OutCodeCID cid.Cid -} - -func (m minerMigrator) migratedCodeCID() cid.Cid { - return m.OutCodeCID -} - -func (m minerMigrator) migrateState(ctx context.Context, store cbor.IpldStore, in actorMigrationInput) (*actorMigrationResult, error) { - var inState miner8.State - if err := store.Get(ctx, in.head, &inState); err != nil { - return nil, err - } - var inInfo miner8.MinerInfo - if err := store.Get(ctx, inState.Info, &inInfo); err != nil { - return nil, err - } - wrappedStore := adt.WrapStore(ctx, store) - - oldPrecommitOnChainInfos, err := adt.AsMap(wrappedStore, inState.PreCommittedSectors, builtin.DefaultHamtBitwidth) - if err != nil { - return nil, xerrors.Errorf("failed to load old precommit onchain infos for miner %s: %w", in.address, err) - } - - emptyMap, err := adt.StoreEmptyMap(wrappedStore, builtin.DefaultHamtBitwidth) - if err != nil { - return nil, xerrors.Errorf("failed to make empty map: %w", err) - } - - newPrecommitOnChainInfos, err := adt.AsMap(wrappedStore, emptyMap, builtin.DefaultHamtBitwidth) - if err != nil { - return nil, xerrors.Errorf("failed to load empty map: %w", err) - } - - var info miner8.SectorPreCommitOnChainInfo - err = oldPrecommitOnChainInfos.ForEach(&info, func(key string) error { - var unsealedCid *cid.Cid - if len(info.Info.DealIDs) != 0 { - pieces := make([]abi.PieceInfo, len(info.Info.DealIDs)) - for i, dealID := range info.Info.DealIDs { - deal, err := m.proposals.GetDealProposal(dealID) - if err != nil { - return xerrors.Errorf("error getting deal proposal: %w", err) - } - - pieces[i] = abi.PieceInfo{ - PieceCID: deal.PieceCID, - Size: deal.PieceSize, - } - } - - commd, err := commp.GenerateUnsealedCID(info.Info.SealProof, pieces) - if err != nil { - return xerrors.Errorf("failed to generate unsealed CID: %w", err) - } - - unsealedCid = &commd - } - - err = newPrecommitOnChainInfos.Put(miner9.SectorKey(info.Info.SectorNumber), &miner9.SectorPreCommitOnChainInfo{ - Info: miner9.SectorPreCommitInfo{ - SealProof: info.Info.SealProof, - SectorNumber: info.Info.SectorNumber, - SealedCID: info.Info.SealedCID, - SealRandEpoch: info.Info.SealRandEpoch, - DealIDs: info.Info.DealIDs, - Expiration: info.Info.Expiration, - UnsealedCid: unsealedCid, - }, - PreCommitDeposit: info.PreCommitDeposit, - PreCommitEpoch: info.PreCommitEpoch, - }) - - if err != nil { - return xerrors.Errorf("failed to write new precommitinfo: %w", err) - } - - return nil - }) - - if err != nil { - return nil, xerrors.Errorf("failed to iterate over precommitinfos: %w", err) - } - - newPrecommits, err := newPrecommitOnChainInfos.Root() - if err != nil { - return nil, xerrors.Errorf("failed to flush new precommits: %w", err) - } - - var newPendingWorkerKey *miner9.WorkerKeyChange - if inInfo.PendingWorkerKey != nil { - newPendingWorkerKey = &miner9.WorkerKeyChange{ - NewWorker: inInfo.PendingWorkerKey.NewWorker, - EffectiveAt: inInfo.PendingWorkerKey.EffectiveAt, - } - } - - outInfo := miner9.MinerInfo{ - Owner: inInfo.Owner, - Worker: inInfo.Worker, - Beneficiary: inInfo.Owner, - BeneficiaryTerm: miner9.BeneficiaryTerm{ - Quota: abi.NewTokenAmount(0), - UsedQuota: abi.NewTokenAmount(0), - Expiration: 0, - }, - PendingBeneficiaryTerm: nil, - ControlAddresses: inInfo.ControlAddresses, - PendingWorkerKey: newPendingWorkerKey, - PeerId: inInfo.PeerId, - Multiaddrs: inInfo.Multiaddrs, - WindowPoStProofType: inInfo.WindowPoStProofType, - SectorSize: inInfo.SectorSize, - WindowPoStPartitionSectors: inInfo.WindowPoStPartitionSectors, - ConsensusFaultElapsed: inInfo.ConsensusFaultElapsed, - PendingOwnerAddress: inInfo.PendingOwnerAddress, - } - newInfoCid, err := store.Put(ctx, &outInfo) - if err != nil { - return nil, xerrors.Errorf("failed to flush new miner info: %w", err) - } - - outState := miner9.State{ - Info: newInfoCid, - PreCommitDeposits: inState.PreCommitDeposits, - LockedFunds: inState.LockedFunds, - VestingFunds: inState.VestingFunds, - FeeDebt: inState.FeeDebt, - InitialPledge: inState.InitialPledge, - PreCommittedSectors: newPrecommits, - PreCommittedSectorsCleanUp: inState.PreCommittedSectorsCleanUp, - AllocatedSectors: inState.AllocatedSectors, - Sectors: inState.Sectors, - ProvingPeriodStart: inState.ProvingPeriodStart, - CurrentDeadline: inState.CurrentDeadline, - Deadlines: inState.Deadlines, - EarlyTerminations: inState.EarlyTerminations, - DeadlineCronActive: inState.DeadlineCronActive, - } - - newHead, err := store.Put(ctx, &outState) - return &actorMigrationResult{ - newCodeCID: m.migratedCodeCID(), - newHead: newHead, - }, err -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/migration/system.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/migration/system.go deleted file mode 100644 index 3571357..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/migration/system.go +++ /dev/null @@ -1,30 +0,0 @@ -package migration - -import ( - "context" - - "github.com/filecoin-project/go-state-types/builtin/v8/system" - - "github.com/ipfs/go-cid" - cbor "github.com/ipfs/go-ipld-cbor" -) - -// System Actor migrator -type systemActorMigrator struct { - OutCodeCID cid.Cid - ManifestData cid.Cid -} - -func (m systemActorMigrator) migrateState(ctx context.Context, store cbor.IpldStore, in actorMigrationInput) (*actorMigrationResult, error) { - // The ManifestData itself is already in the blockstore - state := system.State{BuiltinActors: m.ManifestData} - stateHead, err := store.Put(ctx, &state) - if err != nil { - return nil, err - } - - return &actorMigrationResult{ - newCodeCID: m.OutCodeCID, - newHead: stateHead, - }, nil -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/migration/top.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/migration/top.go deleted file mode 100644 index df339f8..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/migration/top.go +++ /dev/null @@ -1,378 +0,0 @@ -package migration - -import ( - "context" - "sync" - "sync/atomic" - "time" - - market8 "github.com/filecoin-project/go-state-types/builtin/v8/market" - - "github.com/filecoin-project/go-state-types/builtin/v8/system" - - "github.com/filecoin-project/go-state-types/builtin" - - "github.com/multiformats/go-multibase" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - adt8 "github.com/filecoin-project/go-state-types/builtin/v8/util/adt" - "github.com/filecoin-project/go-state-types/manifest" - "github.com/filecoin-project/go-state-types/rt" - - "github.com/ipfs/go-cid" - cbor "github.com/ipfs/go-ipld-cbor" - "golang.org/x/sync/errgroup" - "golang.org/x/xerrors" -) - -// Config parameterizes a state tree migration -type Config struct { - // Number of migration worker goroutines to run. - // More workers enables higher CPU utilization doing migration computations (including state encoding) - MaxWorkers uint - // Capacity of the queue of jobs available to workers (zero for unbuffered). - // A queue length of hundreds to thousands improves throughput at the cost of memory. - JobQueueSize uint - // Capacity of the queue receiving migration results from workers, for persisting (zero for unbuffered). - // A queue length of tens to hundreds improves throughput at the cost of memory. - ResultQueueSize uint - // Time between progress logs to emit. - // Zero (the default) results in no progress logs. - ProgressLogPeriod time.Duration -} - -type Logger interface { - // This is the same logging interface provided by the Runtime - Log(level rt.LogLevel, msg string, args ...interface{}) -} - -// MigrationCache stores and loads cached data. Its implementation must be threadsafe -type MigrationCache interface { - Write(key string, newCid cid.Cid) error - Read(key string) (bool, cid.Cid, error) - Load(key string, loadFunc func() (cid.Cid, error)) (cid.Cid, error) -} - -func ActorHeadKey(addr address.Address, head cid.Cid) string { - headKey, err := head.StringOfBase(multibase.Base32) - if err != nil { - panic(err) - } - - return addr.String() + "-head-" + headKey -} - -// Migrates the filecoin state tree starting from the global state tree and upgrading all actor state. -// The store must support concurrent writes (even if the configured worker count is 1). -func MigrateStateTree(ctx context.Context, store cbor.IpldStore, newManifestCID cid.Cid, actorsRootIn cid.Cid, priorEpoch abi.ChainEpoch, cfg Config, log Logger, cache MigrationCache) (cid.Cid, error) { - if cfg.MaxWorkers <= 0 { - return cid.Undef, xerrors.Errorf("invalid migration config with %d workers", cfg.MaxWorkers) - } - - adtStore := adt8.WrapStore(ctx, store) - - // Load input and output state trees - actorsIn, err := LoadTree(adtStore, actorsRootIn) - if err != nil { - return cid.Undef, err - } - actorsOut, err := NewTree(adtStore) - if err != nil { - return cid.Undef, err - } - - // load old manifest data - systemActor, ok, err := actorsIn.GetActor(builtin.SystemActorAddr) - if err != nil { - return cid.Undef, xerrors.Errorf("failed to get system actor: %w", err) - } - - if !ok { - return cid.Undef, xerrors.New("didn't find system actor") - } - - var systemState system.State - if err := store.Get(ctx, systemActor.Head, &systemState); err != nil { - return cid.Undef, xerrors.Errorf("failed to get system actor state: %w", err) - } - - var oldManifestData manifest.ManifestData - if err := store.Get(ctx, systemState.BuiltinActors, &oldManifestData); err != nil { - return cid.Undef, xerrors.Errorf("failed to get old manifest data: %w", err) - } - - // load new manifest - var newManifest manifest.Manifest - if err := adtStore.Get(ctx, newManifestCID, &newManifest); err != nil { - return cid.Undef, xerrors.Errorf("error reading actor manifest: %w", err) - } - - if err := newManifest.Load(ctx, adtStore); err != nil { - return cid.Undef, xerrors.Errorf("error loading actor manifest: %w", err) - } - - // Maps prior version code CIDs to migration functions. - migrations := make(map[cid.Cid]actorMigration) - - // Set of prior version code CIDs for actors to defer during iteration, for explicit migration afterwards. - var deferredCodeIDs = map[cid.Cid]struct{}{ - // None - } - - // simple code migrations - simpleMigrations := make(map[string]cid.Cid, len(oldManifestData.Entries)) - - miner8Cid := cid.Undef - for _, entry := range oldManifestData.Entries { - simpleMigrations[entry.Name] = entry.Code - if entry.Name == "storageminer" { - miner8Cid = entry.Code - } - } - - if miner8Cid == cid.Undef { - return cid.Undef, xerrors.Errorf("didn't find miner in old manifest entries") - } - - for name, oldCodeCID := range simpleMigrations { //nolint:nomaprange - newCodeCID, ok := newManifest.Get(name) - if !ok { - return cid.Undef, xerrors.Errorf("code cid for %s actor not found in new manifest", name) - } - - migrations[oldCodeCID] = codeMigrator{newCodeCID} - } - - // migrations that migrate both code and state - newSystemCodeCID, ok := newManifest.Get("system") - if !ok { - return cid.Undef, xerrors.Errorf("code cid for system actor not found in manifest") - } - - miner9Cid, ok := newManifest.Get("storageminer") - if !ok { - return cid.Undef, xerrors.Errorf("code cid for miner actor not found in new manifest") - } - - migrations[systemActor.Code] = systemActorMigrator{newSystemCodeCID, newManifest.Data} - - // load market proposals - marketActor, ok, err := actorsIn.GetActor(builtin.StorageMarketActorAddr) - if err != nil { - return cid.Undef, xerrors.Errorf("failed to get market actor: %w", err) - } - - if !ok { - return cid.Undef, xerrors.New("didn't find market actor") - } - - var marketState market8.State - if err := store.Get(ctx, marketActor.Head, &marketState); err != nil { - return cid.Undef, xerrors.Errorf("failed to get system actor state: %w", err) - } - - proposals, err := market8.AsDealProposalArray(adtStore, marketState.Proposals) - if err != nil { - return cid.Undef, xerrors.Errorf("failed to get proposals: %w", err) - } - - migrations[miner8Cid] = minerMigrator{proposals, miner9Cid} - - if len(migrations)+len(deferredCodeIDs) != len(oldManifestData.Entries) { - return cid.Undef, xerrors.Errorf("incomplete migration specification with %d code CIDs, need %d", len(migrations), len(oldManifestData.Entries)) - } - startTime := time.Now() - - // Setup synchronization - grp, ctx := errgroup.WithContext(ctx) - // Input and output queues for workers. - jobCh := make(chan *migrationJob, cfg.JobQueueSize) - jobResultCh := make(chan *migrationJobResult, cfg.ResultQueueSize) - // Atomically-modified counters for logging progress - var jobCount uint32 - var doneCount uint32 - - // Iterate all actors in old state root to create migration jobs for each non-deferred actor. - grp.Go(func() error { - defer close(jobCh) - log.Log(rt.INFO, "Creating migration jobs for tree %s", actorsRootIn) - if err = actorsIn.ForEach(func(addr address.Address, actorIn *Actor) error { - if _, ok := deferredCodeIDs[actorIn.Code]; ok { - return nil - } - - migration, ok := migrations[actorIn.Code] - if !ok { - return xerrors.Errorf("actor with code %s has no registered migration function", actorIn.Code) - } - - nextInput := &migrationJob{ - Address: addr, - Actor: *actorIn, // Must take a copy, the pointer is not stable. - cache: cache, - actorMigration: migration, - } - - select { - case jobCh <- nextInput: - case <-ctx.Done(): - return ctx.Err() - } - atomic.AddUint32(&jobCount, 1) - return nil - }); err != nil { - return err - } - log.Log(rt.INFO, "Done creating %d migration jobs for tree %s after %v", jobCount, actorsRootIn, time.Since(startTime)) - return nil - }) - - // Worker threads run jobs. - var workerWg sync.WaitGroup - for i := uint(0); i < cfg.MaxWorkers; i++ { - workerWg.Add(1) - workerId := i - grp.Go(func() error { - defer workerWg.Done() - for job := range jobCh { - result, err := job.run(ctx, store, priorEpoch) - if err != nil { - return err - } - select { - case jobResultCh <- result: - case <-ctx.Done(): - return ctx.Err() - } - atomic.AddUint32(&doneCount, 1) - } - log.Log(rt.INFO, "Worker %d done", workerId) - return nil - }) - } - log.Log(rt.INFO, "Started %d workers", cfg.MaxWorkers) - - // Monitor the job queue. This non-critical goroutine is outside the errgroup and exits when - // workersFinished is closed, or the context done. - workersFinished := make(chan struct{}) // Closed when waitgroup is emptied. - if cfg.ProgressLogPeriod > 0 { - go func() { - defer log.Log(rt.DEBUG, "Job queue monitor done") - for { - select { - case <-time.After(cfg.ProgressLogPeriod): - jobsNow := jobCount // Snapshot values to avoid incorrect-looking arithmetic if they change. - doneNow := doneCount - pendingNow := jobsNow - doneNow - elapsed := time.Since(startTime) - rate := float64(doneNow) / elapsed.Seconds() - log.Log(rt.INFO, "%d jobs created, %d done, %d pending after %v (%.0f/s)", - jobsNow, doneNow, pendingNow, elapsed, rate) - case <-workersFinished: - return - case <-ctx.Done(): - return - } - } - }() - } - - // Close result channel when workers are done sending to it. - grp.Go(func() error { - workerWg.Wait() - close(jobResultCh) - close(workersFinished) - log.Log(rt.INFO, "All workers done after %v", time.Since(startTime)) - return nil - }) - - // Insert migrated records in output state tree and accumulators. - grp.Go(func() error { - log.Log(rt.INFO, "Result writer started") - resultCount := 0 - for result := range jobResultCh { - if err := actorsOut.SetActor(result.Address, &result.Actor); err != nil { - return err - } - resultCount++ - } - log.Log(rt.INFO, "Result writer wrote %d results to state tree after %v", resultCount, time.Since(startTime)) - return nil - }) - - if err := grp.Wait(); err != nil { - return cid.Undef, err - } - - elapsed := time.Since(startTime) - rate := float64(doneCount) / elapsed.Seconds() - log.Log(rt.INFO, "All %d done after %v (%.0f/s). Flushing state tree root.", doneCount, elapsed, rate) - return actorsOut.Flush() -} - -type actorMigrationInput struct { - address address.Address // actor's address - head cid.Cid - priorEpoch abi.ChainEpoch // epoch of last state transition prior to migration - cache MigrationCache // cache of existing cid -> cid migrations for this actor -} - -type actorMigrationResult struct { - newCodeCID cid.Cid - newHead cid.Cid -} - -type actorMigration interface { - // Loads an actor's state from an input store and writes new state to an output store. - // Returns the new state head CID. - migrateState(ctx context.Context, store cbor.IpldStore, input actorMigrationInput) (result *actorMigrationResult, err error) -} - -type migrationJob struct { - address.Address - Actor - actorMigration - cache MigrationCache -} - -type migrationJobResult struct { - address.Address - Actor -} - -func (job *migrationJob) run(ctx context.Context, store cbor.IpldStore, priorEpoch abi.ChainEpoch) (*migrationJobResult, error) { - result, err := job.migrateState(ctx, store, actorMigrationInput{ - address: job.Address, - head: job.Actor.Head, - priorEpoch: priorEpoch, - cache: job.cache, - }) - if err != nil { - return nil, xerrors.Errorf("state migration failed for actor code %s, addr %s: %w", - job.Actor.Code, job.Address, err) - } - - // Set up new actor record with the migrated state. - return &migrationJobResult{ - job.Address, // Unchanged - Actor{ - Code: result.newCodeCID, - Head: result.newHead, - CallSeqNum: job.Actor.CallSeqNum, // Unchanged - Balance: job.Actor.Balance, // Unchanged - }, - }, nil -} - -// Migrator which preserves the head CID and provides a fixed result code CID. -type codeMigrator struct { - OutCodeCID cid.Cid -} - -func (n codeMigrator) migrateState(_ context.Context, _ cbor.IpldStore, in actorMigrationInput) (*actorMigrationResult, error) { - return &actorMigrationResult{ - newCodeCID: n.OutCodeCID, - newHead: in.head, - }, nil -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/migration/tree.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/migration/tree.go deleted file mode 100644 index 9b43ef2..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/migration/tree.go +++ /dev/null @@ -1,96 +0,0 @@ -package migration - -import ( - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/go-state-types/builtin" - "github.com/filecoin-project/go-state-types/builtin/v8/util/adt" - "github.com/ipfs/go-cid" - "golang.org/x/xerrors" -) - -// Value type of the top level of the state tree. -// Represents the on-chain state of a single actor. -type Actor struct { - Code cid.Cid // CID representing the code associated with the actor - Head cid.Cid // CID of the head state object for the actor - CallSeqNum uint64 // CallSeqNum for the next message to be received by the actor (non-zero for accounts only) - Balance big.Int // Token balance of the actor -} - -// A specialization of a map of ID-addresses to actor heads. -type Tree struct { - Map *adt.Map - Store adt.Store -} - -// Initializes a new, empty state tree backed by a store. -func NewTree(store adt.Store) (*Tree, error) { - emptyMap, err := adt.MakeEmptyMap(store, builtin.DefaultHamtBitwidth) - if err != nil { - return nil, err - } - return &Tree{ - Map: emptyMap, - Store: store, - }, nil -} - -// Loads a tree from a root CID and store. -func LoadTree(s adt.Store, r cid.Cid) (*Tree, error) { - m, err := adt.AsMap(s, r, builtin.DefaultHamtBitwidth) - if err != nil { - return nil, err - } - return &Tree{ - Map: m, - Store: s, - }, nil -} - -// Writes the tree root node to the store, and returns its CID. -func (t *Tree) Flush() (cid.Cid, error) { - return t.Map.Root() -} - -// Loads the state associated with an address. -func (t *Tree) GetActor(addr address.Address) (*Actor, bool, error) { - if addr.Protocol() != address.ID { - return nil, false, xerrors.Errorf("non-ID address %v invalid as actor key", addr) - } - var actor Actor - found, err := t.Map.Get(abi.AddrKey(addr), &actor) - return &actor, found, err -} - -// Sets the state associated with an address, overwriting if it already present. -func (t *Tree) SetActor(addr address.Address, actor *Actor) error { - if addr.Protocol() != address.ID { - return xerrors.Errorf("non-ID address %v invalid as actor key", addr) - } - return t.Map.Put(abi.AddrKey(addr), actor) -} - -// Traverses all entries in the tree. -func (t *Tree) ForEach(fn func(addr address.Address, actor *Actor) error) error { - var val Actor - return t.Map.ForEach(&val, func(key string) error { - addr, err := address.NewFromBytes([]byte(key)) - if err != nil { - return err - } - return fn(addr, &val) - }) -} - -// Traverses all keys in the tree, without decoding the actor states. -func (t *Tree) ForEachKey(fn func(addr address.Address) error) error { - return t.Map.ForEach(nil, func(key string) error { - addr, err := address.NewFromBytes([]byte(key)) - if err != nil { - return err - } - return fn(addr) - }) -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/migration/util.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/migration/util.go deleted file mode 100644 index d5988a7..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/migration/util.go +++ /dev/null @@ -1,63 +0,0 @@ -package migration - -import ( - "sync" - - "github.com/ipfs/go-cid" - "golang.org/x/xerrors" -) - -type MemMigrationCache struct { - MigrationMap sync.Map -} - -func NewMemMigrationCache() *MemMigrationCache { - return new(MemMigrationCache) -} - -func (m *MemMigrationCache) Write(key string, c cid.Cid) error { - m.MigrationMap.Store(key, c) - return nil -} - -func (m *MemMigrationCache) Read(key string) (bool, cid.Cid, error) { - val, found := m.MigrationMap.Load(key) - if !found { - return false, cid.Undef, nil - } - c, ok := val.(cid.Cid) - if !ok { - return false, cid.Undef, xerrors.Errorf("non cid value in cache") - } - - return true, c, nil -} - -func (m *MemMigrationCache) Load(key string, loadFunc func() (cid.Cid, error)) (cid.Cid, error) { - found, c, err := m.Read(key) - if err != nil { - return cid.Undef, err - } - if found { - return c, nil - } - c, err = loadFunc() - if err != nil { - return cid.Undef, err - } - m.MigrationMap.Store(key, c) - return c, nil -} - -func (m *MemMigrationCache) Clone() *MemMigrationCache { - newCache := NewMemMigrationCache() - newCache.Update(m) - return newCache -} - -func (m *MemMigrationCache) Update(other *MemMigrationCache) { - other.MigrationMap.Range(func(key, value interface{}) bool { - m.MigrationMap.Store(key, value) - return true - }) -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/miner/cbor_gen.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/miner/cbor_gen.go deleted file mode 100644 index 4af602c..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/miner/cbor_gen.go +++ /dev/null @@ -1,6207 +0,0 @@ -// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. - -package miner - -import ( - "fmt" - "io" - - address "github.com/filecoin-project/go-address" - abi "github.com/filecoin-project/go-state-types/abi" - proof "github.com/filecoin-project/go-state-types/proof" - cid "github.com/ipfs/go-cid" - cbg "github.com/whyrusleeping/cbor-gen" - xerrors "golang.org/x/xerrors" -) - -var _ = xerrors.Errorf - -var lengthBufState = []byte{143} - -func (t *State) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufState); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Info (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.Info); err != nil { - return xerrors.Errorf("failed to write cid field t.Info: %w", err) - } - - // t.PreCommitDeposits (big.Int) (struct) - if err := t.PreCommitDeposits.MarshalCBOR(w); err != nil { - return err - } - - // t.LockedFunds (big.Int) (struct) - if err := t.LockedFunds.MarshalCBOR(w); err != nil { - return err - } - - // t.VestingFunds (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.VestingFunds); err != nil { - return xerrors.Errorf("failed to write cid field t.VestingFunds: %w", err) - } - - // t.FeeDebt (big.Int) (struct) - if err := t.FeeDebt.MarshalCBOR(w); err != nil { - return err - } - - // t.InitialPledge (big.Int) (struct) - if err := t.InitialPledge.MarshalCBOR(w); err != nil { - return err - } - - // t.PreCommittedSectors (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.PreCommittedSectors); err != nil { - return xerrors.Errorf("failed to write cid field t.PreCommittedSectors: %w", err) - } - - // t.PreCommittedSectorsCleanUp (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.PreCommittedSectorsCleanUp); err != nil { - return xerrors.Errorf("failed to write cid field t.PreCommittedSectorsCleanUp: %w", err) - } - - // t.AllocatedSectors (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.AllocatedSectors); err != nil { - return xerrors.Errorf("failed to write cid field t.AllocatedSectors: %w", err) - } - - // t.Sectors (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.Sectors); err != nil { - return xerrors.Errorf("failed to write cid field t.Sectors: %w", err) - } - - // t.ProvingPeriodStart (abi.ChainEpoch) (int64) - if t.ProvingPeriodStart >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.ProvingPeriodStart)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.ProvingPeriodStart-1)); err != nil { - return err - } - } - - // t.CurrentDeadline (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.CurrentDeadline)); err != nil { - return err - } - - // t.Deadlines (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.Deadlines); err != nil { - return xerrors.Errorf("failed to write cid field t.Deadlines: %w", err) - } - - // t.EarlyTerminations (bitfield.BitField) (struct) - if err := t.EarlyTerminations.MarshalCBOR(w); err != nil { - return err - } - - // t.DeadlineCronActive (bool) (bool) - if err := cbg.WriteBool(w, t.DeadlineCronActive); err != nil { - return err - } - return nil -} - -func (t *State) UnmarshalCBOR(r io.Reader) error { - *t = State{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 15 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Info (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.Info: %w", err) - } - - t.Info = c - - } - // t.PreCommitDeposits (big.Int) (struct) - - { - - if err := t.PreCommitDeposits.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.PreCommitDeposits: %w", err) - } - - } - // t.LockedFunds (big.Int) (struct) - - { - - if err := t.LockedFunds.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.LockedFunds: %w", err) - } - - } - // t.VestingFunds (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.VestingFunds: %w", err) - } - - t.VestingFunds = c - - } - // t.FeeDebt (big.Int) (struct) - - { - - if err := t.FeeDebt.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.FeeDebt: %w", err) - } - - } - // t.InitialPledge (big.Int) (struct) - - { - - if err := t.InitialPledge.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.InitialPledge: %w", err) - } - - } - // t.PreCommittedSectors (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.PreCommittedSectors: %w", err) - } - - t.PreCommittedSectors = c - - } - // t.PreCommittedSectorsCleanUp (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.PreCommittedSectorsCleanUp: %w", err) - } - - t.PreCommittedSectorsCleanUp = c - - } - // t.AllocatedSectors (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.AllocatedSectors: %w", err) - } - - t.AllocatedSectors = c - - } - // t.Sectors (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.Sectors: %w", err) - } - - t.Sectors = c - - } - // t.ProvingPeriodStart (abi.ChainEpoch) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.ProvingPeriodStart = abi.ChainEpoch(extraI) - } - // t.CurrentDeadline (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.CurrentDeadline = uint64(extra) - - } - // t.Deadlines (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.Deadlines: %w", err) - } - - t.Deadlines = c - - } - // t.EarlyTerminations (bitfield.BitField) (struct) - - { - - if err := t.EarlyTerminations.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.EarlyTerminations: %w", err) - } - - } - // t.DeadlineCronActive (bool) (bool) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajOther { - return fmt.Errorf("booleans must be major type 7") - } - switch extra { - case 20: - t.DeadlineCronActive = false - case 21: - t.DeadlineCronActive = true - default: - return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) - } - return nil -} - -var lengthBufMinerInfo = []byte{142} - -func (t *MinerInfo) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufMinerInfo); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Owner (address.Address) (struct) - if err := t.Owner.MarshalCBOR(w); err != nil { - return err - } - - // t.Worker (address.Address) (struct) - if err := t.Worker.MarshalCBOR(w); err != nil { - return err - } - - // t.ControlAddresses ([]address.Address) (slice) - if len(t.ControlAddresses) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.ControlAddresses was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.ControlAddresses))); err != nil { - return err - } - for _, v := range t.ControlAddresses { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - - // t.PendingWorkerKey (miner.WorkerKeyChange) (struct) - if err := t.PendingWorkerKey.MarshalCBOR(w); err != nil { - return err - } - - // t.PeerId ([]uint8) (slice) - if len(t.PeerId) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.PeerId was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.PeerId))); err != nil { - return err - } - - if _, err := w.Write(t.PeerId[:]); err != nil { - return err - } - - // t.Multiaddrs ([][]uint8) (slice) - if len(t.Multiaddrs) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Multiaddrs was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Multiaddrs))); err != nil { - return err - } - for _, v := range t.Multiaddrs { - if len(v) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field v was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(v))); err != nil { - return err - } - - if _, err := w.Write(v[:]); err != nil { - return err - } - } - - // t.WindowPoStProofType (abi.RegisteredPoStProof) (int64) - if t.WindowPoStProofType >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.WindowPoStProofType)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.WindowPoStProofType-1)); err != nil { - return err - } - } - - // t.SectorSize (abi.SectorSize) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SectorSize)); err != nil { - return err - } - - // t.WindowPoStPartitionSectors (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.WindowPoStPartitionSectors)); err != nil { - return err - } - - // t.ConsensusFaultElapsed (abi.ChainEpoch) (int64) - if t.ConsensusFaultElapsed >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.ConsensusFaultElapsed)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.ConsensusFaultElapsed-1)); err != nil { - return err - } - } - - // t.PendingOwnerAddress (address.Address) (struct) - if err := t.PendingOwnerAddress.MarshalCBOR(w); err != nil { - return err - } - - // t.Beneficiary (address.Address) (struct) - if err := t.Beneficiary.MarshalCBOR(w); err != nil { - return err - } - - // t.BeneficiaryTerm (miner.BeneficiaryTerm) (struct) - if err := t.BeneficiaryTerm.MarshalCBOR(w); err != nil { - return err - } - - // t.PendingBeneficiaryTerm (miner.PendingBeneficiaryChange) (struct) - if err := t.PendingBeneficiaryTerm.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *MinerInfo) UnmarshalCBOR(r io.Reader) error { - *t = MinerInfo{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 14 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Owner (address.Address) (struct) - - { - - if err := t.Owner.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Owner: %w", err) - } - - } - // t.Worker (address.Address) (struct) - - { - - if err := t.Worker.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Worker: %w", err) - } - - } - // t.ControlAddresses ([]address.Address) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.ControlAddresses: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.ControlAddresses = make([]address.Address, extra) - } - - for i := 0; i < int(extra); i++ { - - var v address.Address - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.ControlAddresses[i] = v - } - - // t.PendingWorkerKey (miner.WorkerKeyChange) (struct) - - { - - b, err := br.ReadByte() - if err != nil { - return err - } - if b != cbg.CborNull[0] { - if err := br.UnreadByte(); err != nil { - return err - } - t.PendingWorkerKey = new(WorkerKeyChange) - if err := t.PendingWorkerKey.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.PendingWorkerKey pointer: %w", err) - } - } - - } - // t.PeerId ([]uint8) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.PeerId: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - - if extra > 0 { - t.PeerId = make([]uint8, extra) - } - - if _, err := io.ReadFull(br, t.PeerId[:]); err != nil { - return err - } - // t.Multiaddrs ([][]uint8) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Multiaddrs: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Multiaddrs = make([][]uint8, extra) - } - - for i := 0; i < int(extra); i++ { - { - var maj byte - var extra uint64 - var err error - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.Multiaddrs[i]: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - - if extra > 0 { - t.Multiaddrs[i] = make([]uint8, extra) - } - - if _, err := io.ReadFull(br, t.Multiaddrs[i][:]); err != nil { - return err - } - } - } - - // t.WindowPoStProofType (abi.RegisteredPoStProof) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.WindowPoStProofType = abi.RegisteredPoStProof(extraI) - } - // t.SectorSize (abi.SectorSize) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.SectorSize = abi.SectorSize(extra) - - } - // t.WindowPoStPartitionSectors (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.WindowPoStPartitionSectors = uint64(extra) - - } - // t.ConsensusFaultElapsed (abi.ChainEpoch) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.ConsensusFaultElapsed = abi.ChainEpoch(extraI) - } - // t.PendingOwnerAddress (address.Address) (struct) - - { - - b, err := br.ReadByte() - if err != nil { - return err - } - if b != cbg.CborNull[0] { - if err := br.UnreadByte(); err != nil { - return err - } - t.PendingOwnerAddress = new(address.Address) - if err := t.PendingOwnerAddress.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.PendingOwnerAddress pointer: %w", err) - } - } - - } - // t.Beneficiary (address.Address) (struct) - - { - - if err := t.Beneficiary.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Beneficiary: %w", err) - } - - } - // t.BeneficiaryTerm (miner.BeneficiaryTerm) (struct) - - { - - if err := t.BeneficiaryTerm.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.BeneficiaryTerm: %w", err) - } - - } - // t.PendingBeneficiaryTerm (miner.PendingBeneficiaryChange) (struct) - - { - - b, err := br.ReadByte() - if err != nil { - return err - } - if b != cbg.CborNull[0] { - if err := br.UnreadByte(); err != nil { - return err - } - t.PendingBeneficiaryTerm = new(PendingBeneficiaryChange) - if err := t.PendingBeneficiaryTerm.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.PendingBeneficiaryTerm pointer: %w", err) - } - } - - } - return nil -} - -var lengthBufDeadlines = []byte{129} - -func (t *Deadlines) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufDeadlines); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Due ([48]cid.Cid) (array) - if len(t.Due) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Due was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Due))); err != nil { - return err - } - for _, v := range t.Due { - if err := cbg.WriteCidBuf(scratch, w, v); err != nil { - return xerrors.Errorf("failed writing cid field t.Due: %w", err) - } - } - return nil -} - -func (t *Deadlines) UnmarshalCBOR(r io.Reader) error { - *t = Deadlines{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Due ([48]cid.Cid) (array) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Due: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra != 48 { - return fmt.Errorf("expected array to have 48 elements") - } - - t.Due = [48]cid.Cid{} - - for i := 0; i < int(extra); i++ { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("reading cid field t.Due failed: %w", err) - } - t.Due[i] = c - } - - return nil -} - -var lengthBufDeadline = []byte{139} - -func (t *Deadline) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufDeadline); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Partitions (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.Partitions); err != nil { - return xerrors.Errorf("failed to write cid field t.Partitions: %w", err) - } - - // t.ExpirationsEpochs (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.ExpirationsEpochs); err != nil { - return xerrors.Errorf("failed to write cid field t.ExpirationsEpochs: %w", err) - } - - // t.PartitionsPoSted (bitfield.BitField) (struct) - if err := t.PartitionsPoSted.MarshalCBOR(w); err != nil { - return err - } - - // t.EarlyTerminations (bitfield.BitField) (struct) - if err := t.EarlyTerminations.MarshalCBOR(w); err != nil { - return err - } - - // t.LiveSectors (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.LiveSectors)); err != nil { - return err - } - - // t.TotalSectors (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.TotalSectors)); err != nil { - return err - } - - // t.FaultyPower (miner.PowerPair) (struct) - if err := t.FaultyPower.MarshalCBOR(w); err != nil { - return err - } - - // t.OptimisticPoStSubmissions (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.OptimisticPoStSubmissions); err != nil { - return xerrors.Errorf("failed to write cid field t.OptimisticPoStSubmissions: %w", err) - } - - // t.SectorsSnapshot (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.SectorsSnapshot); err != nil { - return xerrors.Errorf("failed to write cid field t.SectorsSnapshot: %w", err) - } - - // t.PartitionsSnapshot (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.PartitionsSnapshot); err != nil { - return xerrors.Errorf("failed to write cid field t.PartitionsSnapshot: %w", err) - } - - // t.OptimisticPoStSubmissionsSnapshot (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.OptimisticPoStSubmissionsSnapshot); err != nil { - return xerrors.Errorf("failed to write cid field t.OptimisticPoStSubmissionsSnapshot: %w", err) - } - - return nil -} - -func (t *Deadline) UnmarshalCBOR(r io.Reader) error { - *t = Deadline{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 11 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Partitions (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.Partitions: %w", err) - } - - t.Partitions = c - - } - // t.ExpirationsEpochs (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.ExpirationsEpochs: %w", err) - } - - t.ExpirationsEpochs = c - - } - // t.PartitionsPoSted (bitfield.BitField) (struct) - - { - - if err := t.PartitionsPoSted.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.PartitionsPoSted: %w", err) - } - - } - // t.EarlyTerminations (bitfield.BitField) (struct) - - { - - if err := t.EarlyTerminations.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.EarlyTerminations: %w", err) - } - - } - // t.LiveSectors (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.LiveSectors = uint64(extra) - - } - // t.TotalSectors (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.TotalSectors = uint64(extra) - - } - // t.FaultyPower (miner.PowerPair) (struct) - - { - - if err := t.FaultyPower.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.FaultyPower: %w", err) - } - - } - // t.OptimisticPoStSubmissions (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.OptimisticPoStSubmissions: %w", err) - } - - t.OptimisticPoStSubmissions = c - - } - // t.SectorsSnapshot (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.SectorsSnapshot: %w", err) - } - - t.SectorsSnapshot = c - - } - // t.PartitionsSnapshot (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.PartitionsSnapshot: %w", err) - } - - t.PartitionsSnapshot = c - - } - // t.OptimisticPoStSubmissionsSnapshot (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.OptimisticPoStSubmissionsSnapshot: %w", err) - } - - t.OptimisticPoStSubmissionsSnapshot = c - - } - return nil -} - -var lengthBufPartition = []byte{139} - -func (t *Partition) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufPartition); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Sectors (bitfield.BitField) (struct) - if err := t.Sectors.MarshalCBOR(w); err != nil { - return err - } - - // t.Unproven (bitfield.BitField) (struct) - if err := t.Unproven.MarshalCBOR(w); err != nil { - return err - } - - // t.Faults (bitfield.BitField) (struct) - if err := t.Faults.MarshalCBOR(w); err != nil { - return err - } - - // t.Recoveries (bitfield.BitField) (struct) - if err := t.Recoveries.MarshalCBOR(w); err != nil { - return err - } - - // t.Terminated (bitfield.BitField) (struct) - if err := t.Terminated.MarshalCBOR(w); err != nil { - return err - } - - // t.ExpirationsEpochs (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.ExpirationsEpochs); err != nil { - return xerrors.Errorf("failed to write cid field t.ExpirationsEpochs: %w", err) - } - - // t.EarlyTerminated (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.EarlyTerminated); err != nil { - return xerrors.Errorf("failed to write cid field t.EarlyTerminated: %w", err) - } - - // t.LivePower (miner.PowerPair) (struct) - if err := t.LivePower.MarshalCBOR(w); err != nil { - return err - } - - // t.UnprovenPower (miner.PowerPair) (struct) - if err := t.UnprovenPower.MarshalCBOR(w); err != nil { - return err - } - - // t.FaultyPower (miner.PowerPair) (struct) - if err := t.FaultyPower.MarshalCBOR(w); err != nil { - return err - } - - // t.RecoveringPower (miner.PowerPair) (struct) - if err := t.RecoveringPower.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *Partition) UnmarshalCBOR(r io.Reader) error { - *t = Partition{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 11 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Sectors (bitfield.BitField) (struct) - - { - - if err := t.Sectors.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Sectors: %w", err) - } - - } - // t.Unproven (bitfield.BitField) (struct) - - { - - if err := t.Unproven.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Unproven: %w", err) - } - - } - // t.Faults (bitfield.BitField) (struct) - - { - - if err := t.Faults.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Faults: %w", err) - } - - } - // t.Recoveries (bitfield.BitField) (struct) - - { - - if err := t.Recoveries.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Recoveries: %w", err) - } - - } - // t.Terminated (bitfield.BitField) (struct) - - { - - if err := t.Terminated.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Terminated: %w", err) - } - - } - // t.ExpirationsEpochs (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.ExpirationsEpochs: %w", err) - } - - t.ExpirationsEpochs = c - - } - // t.EarlyTerminated (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.EarlyTerminated: %w", err) - } - - t.EarlyTerminated = c - - } - // t.LivePower (miner.PowerPair) (struct) - - { - - if err := t.LivePower.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.LivePower: %w", err) - } - - } - // t.UnprovenPower (miner.PowerPair) (struct) - - { - - if err := t.UnprovenPower.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.UnprovenPower: %w", err) - } - - } - // t.FaultyPower (miner.PowerPair) (struct) - - { - - if err := t.FaultyPower.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.FaultyPower: %w", err) - } - - } - // t.RecoveringPower (miner.PowerPair) (struct) - - { - - if err := t.RecoveringPower.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.RecoveringPower: %w", err) - } - - } - return nil -} - -var lengthBufExpirationSet = []byte{133} - -func (t *ExpirationSet) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufExpirationSet); err != nil { - return err - } - - // t.OnTimeSectors (bitfield.BitField) (struct) - if err := t.OnTimeSectors.MarshalCBOR(w); err != nil { - return err - } - - // t.EarlySectors (bitfield.BitField) (struct) - if err := t.EarlySectors.MarshalCBOR(w); err != nil { - return err - } - - // t.OnTimePledge (big.Int) (struct) - if err := t.OnTimePledge.MarshalCBOR(w); err != nil { - return err - } - - // t.ActivePower (miner.PowerPair) (struct) - if err := t.ActivePower.MarshalCBOR(w); err != nil { - return err - } - - // t.FaultyPower (miner.PowerPair) (struct) - if err := t.FaultyPower.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *ExpirationSet) UnmarshalCBOR(r io.Reader) error { - *t = ExpirationSet{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 5 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.OnTimeSectors (bitfield.BitField) (struct) - - { - - if err := t.OnTimeSectors.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.OnTimeSectors: %w", err) - } - - } - // t.EarlySectors (bitfield.BitField) (struct) - - { - - if err := t.EarlySectors.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.EarlySectors: %w", err) - } - - } - // t.OnTimePledge (big.Int) (struct) - - { - - if err := t.OnTimePledge.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.OnTimePledge: %w", err) - } - - } - // t.ActivePower (miner.PowerPair) (struct) - - { - - if err := t.ActivePower.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.ActivePower: %w", err) - } - - } - // t.FaultyPower (miner.PowerPair) (struct) - - { - - if err := t.FaultyPower.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.FaultyPower: %w", err) - } - - } - return nil -} - -var lengthBufPowerPair = []byte{130} - -func (t *PowerPair) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufPowerPair); err != nil { - return err - } - - // t.Raw (big.Int) (struct) - if err := t.Raw.MarshalCBOR(w); err != nil { - return err - } - - // t.QA (big.Int) (struct) - if err := t.QA.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *PowerPair) UnmarshalCBOR(r io.Reader) error { - *t = PowerPair{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Raw (big.Int) (struct) - - { - - if err := t.Raw.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Raw: %w", err) - } - - } - // t.QA (big.Int) (struct) - - { - - if err := t.QA.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.QA: %w", err) - } - - } - return nil -} - -var lengthBufSectorPreCommitOnChainInfo = []byte{131} - -func (t *SectorPreCommitOnChainInfo) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufSectorPreCommitOnChainInfo); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Info (miner.SectorPreCommitInfo) (struct) - if err := t.Info.MarshalCBOR(w); err != nil { - return err - } - - // t.PreCommitDeposit (big.Int) (struct) - if err := t.PreCommitDeposit.MarshalCBOR(w); err != nil { - return err - } - - // t.PreCommitEpoch (abi.ChainEpoch) (int64) - if t.PreCommitEpoch >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.PreCommitEpoch)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.PreCommitEpoch-1)); err != nil { - return err - } - } - return nil -} - -func (t *SectorPreCommitOnChainInfo) UnmarshalCBOR(r io.Reader) error { - *t = SectorPreCommitOnChainInfo{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 3 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Info (miner.SectorPreCommitInfo) (struct) - - { - - if err := t.Info.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Info: %w", err) - } - - } - // t.PreCommitDeposit (big.Int) (struct) - - { - - if err := t.PreCommitDeposit.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.PreCommitDeposit: %w", err) - } - - } - // t.PreCommitEpoch (abi.ChainEpoch) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.PreCommitEpoch = abi.ChainEpoch(extraI) - } - return nil -} - -var lengthBufSectorPreCommitInfo = []byte{135} - -func (t *SectorPreCommitInfo) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufSectorPreCommitInfo); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.SealProof (abi.RegisteredSealProof) (int64) - if t.SealProof >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SealProof)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.SealProof-1)); err != nil { - return err - } - } - - // t.SectorNumber (abi.SectorNumber) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SectorNumber)); err != nil { - return err - } - - // t.SealedCID (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.SealedCID); err != nil { - return xerrors.Errorf("failed to write cid field t.SealedCID: %w", err) - } - - // t.SealRandEpoch (abi.ChainEpoch) (int64) - if t.SealRandEpoch >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SealRandEpoch)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.SealRandEpoch-1)); err != nil { - return err - } - } - - // t.DealIDs ([]abi.DealID) (slice) - if len(t.DealIDs) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.DealIDs was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.DealIDs))); err != nil { - return err - } - for _, v := range t.DealIDs { - if err := cbg.CborWriteHeader(w, cbg.MajUnsignedInt, uint64(v)); err != nil { - return err - } - } - - // t.Expiration (abi.ChainEpoch) (int64) - if t.Expiration >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Expiration)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Expiration-1)); err != nil { - return err - } - } - - // t.UnsealedCid (cid.Cid) (struct) - - if t.UnsealedCid == nil { - if _, err := w.Write(cbg.CborNull); err != nil { - return err - } - } else { - if err := cbg.WriteCidBuf(scratch, w, *t.UnsealedCid); err != nil { - return xerrors.Errorf("failed to write cid field t.UnsealedCid: %w", err) - } - } - - return nil -} - -func (t *SectorPreCommitInfo) UnmarshalCBOR(r io.Reader) error { - *t = SectorPreCommitInfo{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 7 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.SealProof (abi.RegisteredSealProof) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.SealProof = abi.RegisteredSealProof(extraI) - } - // t.SectorNumber (abi.SectorNumber) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.SectorNumber = abi.SectorNumber(extra) - - } - // t.SealedCID (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.SealedCID: %w", err) - } - - t.SealedCID = c - - } - // t.SealRandEpoch (abi.ChainEpoch) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.SealRandEpoch = abi.ChainEpoch(extraI) - } - // t.DealIDs ([]abi.DealID) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.DealIDs: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.DealIDs = make([]abi.DealID, extra) - } - - for i := 0; i < int(extra); i++ { - - maj, val, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return xerrors.Errorf("failed to read uint64 for t.DealIDs slice: %w", err) - } - - if maj != cbg.MajUnsignedInt { - return xerrors.Errorf("value read for array t.DealIDs was not a uint, instead got %d", maj) - } - - t.DealIDs[i] = abi.DealID(val) - } - - // t.Expiration (abi.ChainEpoch) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.Expiration = abi.ChainEpoch(extraI) - } - // t.UnsealedCid (cid.Cid) (struct) - - { - - b, err := br.ReadByte() - if err != nil { - return err - } - if b != cbg.CborNull[0] { - if err := br.UnreadByte(); err != nil { - return err - } - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.UnsealedCid: %w", err) - } - - t.UnsealedCid = &c - } - - } - return nil -} - -var lengthBufSectorOnChainInfo = []byte{142} - -func (t *SectorOnChainInfo) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufSectorOnChainInfo); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.SectorNumber (abi.SectorNumber) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SectorNumber)); err != nil { - return err - } - - // t.SealProof (abi.RegisteredSealProof) (int64) - if t.SealProof >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SealProof)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.SealProof-1)); err != nil { - return err - } - } - - // t.SealedCID (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.SealedCID); err != nil { - return xerrors.Errorf("failed to write cid field t.SealedCID: %w", err) - } - - // t.DealIDs ([]abi.DealID) (slice) - if len(t.DealIDs) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.DealIDs was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.DealIDs))); err != nil { - return err - } - for _, v := range t.DealIDs { - if err := cbg.CborWriteHeader(w, cbg.MajUnsignedInt, uint64(v)); err != nil { - return err - } - } - - // t.Activation (abi.ChainEpoch) (int64) - if t.Activation >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Activation)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Activation-1)); err != nil { - return err - } - } - - // t.Expiration (abi.ChainEpoch) (int64) - if t.Expiration >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Expiration)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Expiration-1)); err != nil { - return err - } - } - - // t.DealWeight (big.Int) (struct) - if err := t.DealWeight.MarshalCBOR(w); err != nil { - return err - } - - // t.VerifiedDealWeight (big.Int) (struct) - if err := t.VerifiedDealWeight.MarshalCBOR(w); err != nil { - return err - } - - // t.InitialPledge (big.Int) (struct) - if err := t.InitialPledge.MarshalCBOR(w); err != nil { - return err - } - - // t.ExpectedDayReward (big.Int) (struct) - if err := t.ExpectedDayReward.MarshalCBOR(w); err != nil { - return err - } - - // t.ExpectedStoragePledge (big.Int) (struct) - if err := t.ExpectedStoragePledge.MarshalCBOR(w); err != nil { - return err - } - - // t.ReplacedSectorAge (abi.ChainEpoch) (int64) - if t.ReplacedSectorAge >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.ReplacedSectorAge)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.ReplacedSectorAge-1)); err != nil { - return err - } - } - - // t.ReplacedDayReward (big.Int) (struct) - if err := t.ReplacedDayReward.MarshalCBOR(w); err != nil { - return err - } - - // t.SectorKeyCID (cid.Cid) (struct) - - if t.SectorKeyCID == nil { - if _, err := w.Write(cbg.CborNull); err != nil { - return err - } - } else { - if err := cbg.WriteCidBuf(scratch, w, *t.SectorKeyCID); err != nil { - return xerrors.Errorf("failed to write cid field t.SectorKeyCID: %w", err) - } - } - - return nil -} - -func (t *SectorOnChainInfo) UnmarshalCBOR(r io.Reader) error { - *t = SectorOnChainInfo{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 14 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.SectorNumber (abi.SectorNumber) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.SectorNumber = abi.SectorNumber(extra) - - } - // t.SealProof (abi.RegisteredSealProof) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.SealProof = abi.RegisteredSealProof(extraI) - } - // t.SealedCID (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.SealedCID: %w", err) - } - - t.SealedCID = c - - } - // t.DealIDs ([]abi.DealID) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.DealIDs: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.DealIDs = make([]abi.DealID, extra) - } - - for i := 0; i < int(extra); i++ { - - maj, val, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return xerrors.Errorf("failed to read uint64 for t.DealIDs slice: %w", err) - } - - if maj != cbg.MajUnsignedInt { - return xerrors.Errorf("value read for array t.DealIDs was not a uint, instead got %d", maj) - } - - t.DealIDs[i] = abi.DealID(val) - } - - // t.Activation (abi.ChainEpoch) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.Activation = abi.ChainEpoch(extraI) - } - // t.Expiration (abi.ChainEpoch) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.Expiration = abi.ChainEpoch(extraI) - } - // t.DealWeight (big.Int) (struct) - - { - - if err := t.DealWeight.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.DealWeight: %w", err) - } - - } - // t.VerifiedDealWeight (big.Int) (struct) - - { - - if err := t.VerifiedDealWeight.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.VerifiedDealWeight: %w", err) - } - - } - // t.InitialPledge (big.Int) (struct) - - { - - if err := t.InitialPledge.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.InitialPledge: %w", err) - } - - } - // t.ExpectedDayReward (big.Int) (struct) - - { - - if err := t.ExpectedDayReward.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.ExpectedDayReward: %w", err) - } - - } - // t.ExpectedStoragePledge (big.Int) (struct) - - { - - if err := t.ExpectedStoragePledge.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.ExpectedStoragePledge: %w", err) - } - - } - // t.ReplacedSectorAge (abi.ChainEpoch) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.ReplacedSectorAge = abi.ChainEpoch(extraI) - } - // t.ReplacedDayReward (big.Int) (struct) - - { - - if err := t.ReplacedDayReward.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.ReplacedDayReward: %w", err) - } - - } - // t.SectorKeyCID (cid.Cid) (struct) - - { - - b, err := br.ReadByte() - if err != nil { - return err - } - if b != cbg.CborNull[0] { - if err := br.UnreadByte(); err != nil { - return err - } - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.SectorKeyCID: %w", err) - } - - t.SectorKeyCID = &c - } - - } - return nil -} - -var lengthBufWorkerKeyChange = []byte{130} - -func (t *WorkerKeyChange) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufWorkerKeyChange); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.NewWorker (address.Address) (struct) - if err := t.NewWorker.MarshalCBOR(w); err != nil { - return err - } - - // t.EffectiveAt (abi.ChainEpoch) (int64) - if t.EffectiveAt >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.EffectiveAt)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.EffectiveAt-1)); err != nil { - return err - } - } - return nil -} - -func (t *WorkerKeyChange) UnmarshalCBOR(r io.Reader) error { - *t = WorkerKeyChange{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.NewWorker (address.Address) (struct) - - { - - if err := t.NewWorker.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.NewWorker: %w", err) - } - - } - // t.EffectiveAt (abi.ChainEpoch) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.EffectiveAt = abi.ChainEpoch(extraI) - } - return nil -} - -var lengthBufVestingFunds = []byte{129} - -func (t *VestingFunds) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufVestingFunds); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Funds ([]miner.VestingFund) (slice) - if len(t.Funds) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Funds was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Funds))); err != nil { - return err - } - for _, v := range t.Funds { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - return nil -} - -func (t *VestingFunds) UnmarshalCBOR(r io.Reader) error { - *t = VestingFunds{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Funds ([]miner.VestingFund) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Funds: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Funds = make([]VestingFund, extra) - } - - for i := 0; i < int(extra); i++ { - - var v VestingFund - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.Funds[i] = v - } - - return nil -} - -var lengthBufVestingFund = []byte{130} - -func (t *VestingFund) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufVestingFund); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Epoch (abi.ChainEpoch) (int64) - if t.Epoch >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Epoch)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Epoch-1)); err != nil { - return err - } - } - - // t.Amount (big.Int) (struct) - if err := t.Amount.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *VestingFund) UnmarshalCBOR(r io.Reader) error { - *t = VestingFund{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Epoch (abi.ChainEpoch) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.Epoch = abi.ChainEpoch(extraI) - } - // t.Amount (big.Int) (struct) - - { - - if err := t.Amount.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Amount: %w", err) - } - - } - return nil -} - -var lengthBufWindowedPoSt = []byte{130} - -func (t *WindowedPoSt) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufWindowedPoSt); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Partitions (bitfield.BitField) (struct) - if err := t.Partitions.MarshalCBOR(w); err != nil { - return err - } - - // t.Proofs ([]proof.PoStProof) (slice) - if len(t.Proofs) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Proofs was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Proofs))); err != nil { - return err - } - for _, v := range t.Proofs { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - return nil -} - -func (t *WindowedPoSt) UnmarshalCBOR(r io.Reader) error { - *t = WindowedPoSt{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Partitions (bitfield.BitField) (struct) - - { - - if err := t.Partitions.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Partitions: %w", err) - } - - } - // t.Proofs ([]proof.PoStProof) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Proofs: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Proofs = make([]proof.PoStProof, extra) - } - - for i := 0; i < int(extra); i++ { - - var v proof.PoStProof - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.Proofs[i] = v - } - - return nil -} - -var lengthBufActiveBeneficiary = []byte{130} - -func (t *ActiveBeneficiary) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufActiveBeneficiary); err != nil { - return err - } - - // t.Beneficiary (address.Address) (struct) - if err := t.Beneficiary.MarshalCBOR(w); err != nil { - return err - } - - // t.Term (miner.BeneficiaryTerm) (struct) - if err := t.Term.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *ActiveBeneficiary) UnmarshalCBOR(r io.Reader) error { - *t = ActiveBeneficiary{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Beneficiary (address.Address) (struct) - - { - - if err := t.Beneficiary.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Beneficiary: %w", err) - } - - } - // t.Term (miner.BeneficiaryTerm) (struct) - - { - - if err := t.Term.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Term: %w", err) - } - - } - return nil -} - -var lengthBufBeneficiaryTerm = []byte{131} - -func (t *BeneficiaryTerm) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufBeneficiaryTerm); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Quota (big.Int) (struct) - if err := t.Quota.MarshalCBOR(w); err != nil { - return err - } - - // t.UsedQuota (big.Int) (struct) - if err := t.UsedQuota.MarshalCBOR(w); err != nil { - return err - } - - // t.Expiration (abi.ChainEpoch) (int64) - if t.Expiration >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Expiration)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Expiration-1)); err != nil { - return err - } - } - return nil -} - -func (t *BeneficiaryTerm) UnmarshalCBOR(r io.Reader) error { - *t = BeneficiaryTerm{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 3 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Quota (big.Int) (struct) - - { - - if err := t.Quota.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Quota: %w", err) - } - - } - // t.UsedQuota (big.Int) (struct) - - { - - if err := t.UsedQuota.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.UsedQuota: %w", err) - } - - } - // t.Expiration (abi.ChainEpoch) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.Expiration = abi.ChainEpoch(extraI) - } - return nil -} - -var lengthBufPendingBeneficiaryChange = []byte{133} - -func (t *PendingBeneficiaryChange) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufPendingBeneficiaryChange); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.NewBeneficiary (address.Address) (struct) - if err := t.NewBeneficiary.MarshalCBOR(w); err != nil { - return err - } - - // t.NewQuota (big.Int) (struct) - if err := t.NewQuota.MarshalCBOR(w); err != nil { - return err - } - - // t.NewExpiration (abi.ChainEpoch) (int64) - if t.NewExpiration >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.NewExpiration)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.NewExpiration-1)); err != nil { - return err - } - } - - // t.ApprovedByBeneficiary (bool) (bool) - if err := cbg.WriteBool(w, t.ApprovedByBeneficiary); err != nil { - return err - } - - // t.ApprovedByNominee (bool) (bool) - if err := cbg.WriteBool(w, t.ApprovedByNominee); err != nil { - return err - } - return nil -} - -func (t *PendingBeneficiaryChange) UnmarshalCBOR(r io.Reader) error { - *t = PendingBeneficiaryChange{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 5 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.NewBeneficiary (address.Address) (struct) - - { - - if err := t.NewBeneficiary.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.NewBeneficiary: %w", err) - } - - } - // t.NewQuota (big.Int) (struct) - - { - - if err := t.NewQuota.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.NewQuota: %w", err) - } - - } - // t.NewExpiration (abi.ChainEpoch) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.NewExpiration = abi.ChainEpoch(extraI) - } - // t.ApprovedByBeneficiary (bool) (bool) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajOther { - return fmt.Errorf("booleans must be major type 7") - } - switch extra { - case 20: - t.ApprovedByBeneficiary = false - case 21: - t.ApprovedByBeneficiary = true - default: - return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) - } - // t.ApprovedByNominee (bool) (bool) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajOther { - return fmt.Errorf("booleans must be major type 7") - } - switch extra { - case 20: - t.ApprovedByNominee = false - case 21: - t.ApprovedByNominee = true - default: - return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) - } - return nil -} - -var lengthBufSubmitWindowedPoStParams = []byte{133} - -func (t *SubmitWindowedPoStParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufSubmitWindowedPoStParams); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Deadline (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Deadline)); err != nil { - return err - } - - // t.Partitions ([]miner.PoStPartition) (slice) - if len(t.Partitions) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Partitions was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Partitions))); err != nil { - return err - } - for _, v := range t.Partitions { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - - // t.Proofs ([]proof.PoStProof) (slice) - if len(t.Proofs) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Proofs was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Proofs))); err != nil { - return err - } - for _, v := range t.Proofs { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - - // t.ChainCommitEpoch (abi.ChainEpoch) (int64) - if t.ChainCommitEpoch >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.ChainCommitEpoch)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.ChainCommitEpoch-1)); err != nil { - return err - } - } - - // t.ChainCommitRand (abi.Randomness) (slice) - if len(t.ChainCommitRand) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.ChainCommitRand was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.ChainCommitRand))); err != nil { - return err - } - - if _, err := w.Write(t.ChainCommitRand[:]); err != nil { - return err - } - return nil -} - -func (t *SubmitWindowedPoStParams) UnmarshalCBOR(r io.Reader) error { - *t = SubmitWindowedPoStParams{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 5 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Deadline (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Deadline = uint64(extra) - - } - // t.Partitions ([]miner.PoStPartition) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Partitions: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Partitions = make([]PoStPartition, extra) - } - - for i := 0; i < int(extra); i++ { - - var v PoStPartition - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.Partitions[i] = v - } - - // t.Proofs ([]proof.PoStProof) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Proofs: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Proofs = make([]proof.PoStProof, extra) - } - - for i := 0; i < int(extra); i++ { - - var v proof.PoStProof - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.Proofs[i] = v - } - - // t.ChainCommitEpoch (abi.ChainEpoch) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.ChainCommitEpoch = abi.ChainEpoch(extraI) - } - // t.ChainCommitRand (abi.Randomness) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.ChainCommitRand: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - - if extra > 0 { - t.ChainCommitRand = make([]uint8, extra) - } - - if _, err := io.ReadFull(br, t.ChainCommitRand[:]); err != nil { - return err - } - return nil -} - -var lengthBufTerminateSectorsParams = []byte{129} - -func (t *TerminateSectorsParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufTerminateSectorsParams); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Terminations ([]miner.TerminationDeclaration) (slice) - if len(t.Terminations) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Terminations was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Terminations))); err != nil { - return err - } - for _, v := range t.Terminations { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - return nil -} - -func (t *TerminateSectorsParams) UnmarshalCBOR(r io.Reader) error { - *t = TerminateSectorsParams{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Terminations ([]miner.TerminationDeclaration) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Terminations: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Terminations = make([]TerminationDeclaration, extra) - } - - for i := 0; i < int(extra); i++ { - - var v TerminationDeclaration - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.Terminations[i] = v - } - - return nil -} - -var lengthBufTerminateSectorsReturn = []byte{129} - -func (t *TerminateSectorsReturn) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufTerminateSectorsReturn); err != nil { - return err - } - - // t.Done (bool) (bool) - if err := cbg.WriteBool(w, t.Done); err != nil { - return err - } - return nil -} - -func (t *TerminateSectorsReturn) UnmarshalCBOR(r io.Reader) error { - *t = TerminateSectorsReturn{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Done (bool) (bool) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajOther { - return fmt.Errorf("booleans must be major type 7") - } - switch extra { - case 20: - t.Done = false - case 21: - t.Done = true - default: - return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) - } - return nil -} - -var lengthBufChangePeerIDParams = []byte{129} - -func (t *ChangePeerIDParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufChangePeerIDParams); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.NewID ([]uint8) (slice) - if len(t.NewID) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.NewID was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.NewID))); err != nil { - return err - } - - if _, err := w.Write(t.NewID[:]); err != nil { - return err - } - return nil -} - -func (t *ChangePeerIDParams) UnmarshalCBOR(r io.Reader) error { - *t = ChangePeerIDParams{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.NewID ([]uint8) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.NewID: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - - if extra > 0 { - t.NewID = make([]uint8, extra) - } - - if _, err := io.ReadFull(br, t.NewID[:]); err != nil { - return err - } - return nil -} - -var lengthBufChangeMultiaddrsParams = []byte{129} - -func (t *ChangeMultiaddrsParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufChangeMultiaddrsParams); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.NewMultiaddrs ([][]uint8) (slice) - if len(t.NewMultiaddrs) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.NewMultiaddrs was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.NewMultiaddrs))); err != nil { - return err - } - for _, v := range t.NewMultiaddrs { - if len(v) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field v was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(v))); err != nil { - return err - } - - if _, err := w.Write(v[:]); err != nil { - return err - } - } - return nil -} - -func (t *ChangeMultiaddrsParams) UnmarshalCBOR(r io.Reader) error { - *t = ChangeMultiaddrsParams{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.NewMultiaddrs ([][]uint8) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.NewMultiaddrs: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.NewMultiaddrs = make([][]uint8, extra) - } - - for i := 0; i < int(extra); i++ { - { - var maj byte - var extra uint64 - var err error - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.NewMultiaddrs[i]: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - - if extra > 0 { - t.NewMultiaddrs[i] = make([]uint8, extra) - } - - if _, err := io.ReadFull(br, t.NewMultiaddrs[i][:]); err != nil { - return err - } - } - } - - return nil -} - -var lengthBufProveCommitSectorParams = []byte{130} - -func (t *ProveCommitSectorParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufProveCommitSectorParams); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.SectorNumber (abi.SectorNumber) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SectorNumber)); err != nil { - return err - } - - // t.Proof ([]uint8) (slice) - if len(t.Proof) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.Proof was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Proof))); err != nil { - return err - } - - if _, err := w.Write(t.Proof[:]); err != nil { - return err - } - return nil -} - -func (t *ProveCommitSectorParams) UnmarshalCBOR(r io.Reader) error { - *t = ProveCommitSectorParams{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.SectorNumber (abi.SectorNumber) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.SectorNumber = abi.SectorNumber(extra) - - } - // t.Proof ([]uint8) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.Proof: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - - if extra > 0 { - t.Proof = make([]uint8, extra) - } - - if _, err := io.ReadFull(br, t.Proof[:]); err != nil { - return err - } - return nil -} - -var lengthBufProveCommitAggregateParams = []byte{130} - -func (t *ProveCommitAggregateParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufProveCommitAggregateParams); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.SectorNumbers (bitfield.BitField) (struct) - if err := t.SectorNumbers.MarshalCBOR(w); err != nil { - return err - } - - // t.AggregateProof ([]uint8) (slice) - if len(t.AggregateProof) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.AggregateProof was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.AggregateProof))); err != nil { - return err - } - - if _, err := w.Write(t.AggregateProof[:]); err != nil { - return err - } - return nil -} - -func (t *ProveCommitAggregateParams) UnmarshalCBOR(r io.Reader) error { - *t = ProveCommitAggregateParams{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.SectorNumbers (bitfield.BitField) (struct) - - { - - if err := t.SectorNumbers.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.SectorNumbers: %w", err) - } - - } - // t.AggregateProof ([]uint8) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.AggregateProof: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - - if extra > 0 { - t.AggregateProof = make([]uint8, extra) - } - - if _, err := io.ReadFull(br, t.AggregateProof[:]); err != nil { - return err - } - return nil -} - -var lengthBufChangeWorkerAddressParams = []byte{130} - -func (t *ChangeWorkerAddressParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufChangeWorkerAddressParams); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.NewWorker (address.Address) (struct) - if err := t.NewWorker.MarshalCBOR(w); err != nil { - return err - } - - // t.NewControlAddrs ([]address.Address) (slice) - if len(t.NewControlAddrs) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.NewControlAddrs was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.NewControlAddrs))); err != nil { - return err - } - for _, v := range t.NewControlAddrs { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - return nil -} - -func (t *ChangeWorkerAddressParams) UnmarshalCBOR(r io.Reader) error { - *t = ChangeWorkerAddressParams{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.NewWorker (address.Address) (struct) - - { - - if err := t.NewWorker.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.NewWorker: %w", err) - } - - } - // t.NewControlAddrs ([]address.Address) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.NewControlAddrs: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.NewControlAddrs = make([]address.Address, extra) - } - - for i := 0; i < int(extra); i++ { - - var v address.Address - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.NewControlAddrs[i] = v - } - - return nil -} - -var lengthBufExtendSectorExpirationParams = []byte{129} - -func (t *ExtendSectorExpirationParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufExtendSectorExpirationParams); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Extensions ([]miner.ExpirationExtension) (slice) - if len(t.Extensions) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Extensions was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Extensions))); err != nil { - return err - } - for _, v := range t.Extensions { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - return nil -} - -func (t *ExtendSectorExpirationParams) UnmarshalCBOR(r io.Reader) error { - *t = ExtendSectorExpirationParams{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Extensions ([]miner.ExpirationExtension) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Extensions: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Extensions = make([]ExpirationExtension, extra) - } - - for i := 0; i < int(extra); i++ { - - var v ExpirationExtension - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.Extensions[i] = v - } - - return nil -} - -var lengthBufDeclareFaultsParams = []byte{129} - -func (t *DeclareFaultsParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufDeclareFaultsParams); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Faults ([]miner.FaultDeclaration) (slice) - if len(t.Faults) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Faults was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Faults))); err != nil { - return err - } - for _, v := range t.Faults { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - return nil -} - -func (t *DeclareFaultsParams) UnmarshalCBOR(r io.Reader) error { - *t = DeclareFaultsParams{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Faults ([]miner.FaultDeclaration) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Faults: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Faults = make([]FaultDeclaration, extra) - } - - for i := 0; i < int(extra); i++ { - - var v FaultDeclaration - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.Faults[i] = v - } - - return nil -} - -var lengthBufDeclareFaultsRecoveredParams = []byte{129} - -func (t *DeclareFaultsRecoveredParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufDeclareFaultsRecoveredParams); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Recoveries ([]miner.RecoveryDeclaration) (slice) - if len(t.Recoveries) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Recoveries was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Recoveries))); err != nil { - return err - } - for _, v := range t.Recoveries { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - return nil -} - -func (t *DeclareFaultsRecoveredParams) UnmarshalCBOR(r io.Reader) error { - *t = DeclareFaultsRecoveredParams{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Recoveries ([]miner.RecoveryDeclaration) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Recoveries: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Recoveries = make([]RecoveryDeclaration, extra) - } - - for i := 0; i < int(extra); i++ { - - var v RecoveryDeclaration - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.Recoveries[i] = v - } - - return nil -} - -var lengthBufReportConsensusFaultParams = []byte{131} - -func (t *ReportConsensusFaultParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufReportConsensusFaultParams); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.BlockHeader1 ([]uint8) (slice) - if len(t.BlockHeader1) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.BlockHeader1 was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.BlockHeader1))); err != nil { - return err - } - - if _, err := w.Write(t.BlockHeader1[:]); err != nil { - return err - } - - // t.BlockHeader2 ([]uint8) (slice) - if len(t.BlockHeader2) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.BlockHeader2 was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.BlockHeader2))); err != nil { - return err - } - - if _, err := w.Write(t.BlockHeader2[:]); err != nil { - return err - } - - // t.BlockHeaderExtra ([]uint8) (slice) - if len(t.BlockHeaderExtra) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.BlockHeaderExtra was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.BlockHeaderExtra))); err != nil { - return err - } - - if _, err := w.Write(t.BlockHeaderExtra[:]); err != nil { - return err - } - return nil -} - -func (t *ReportConsensusFaultParams) UnmarshalCBOR(r io.Reader) error { - *t = ReportConsensusFaultParams{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 3 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.BlockHeader1 ([]uint8) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.BlockHeader1: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - - if extra > 0 { - t.BlockHeader1 = make([]uint8, extra) - } - - if _, err := io.ReadFull(br, t.BlockHeader1[:]); err != nil { - return err - } - // t.BlockHeader2 ([]uint8) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.BlockHeader2: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - - if extra > 0 { - t.BlockHeader2 = make([]uint8, extra) - } - - if _, err := io.ReadFull(br, t.BlockHeader2[:]); err != nil { - return err - } - // t.BlockHeaderExtra ([]uint8) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.BlockHeaderExtra: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - - if extra > 0 { - t.BlockHeaderExtra = make([]uint8, extra) - } - - if _, err := io.ReadFull(br, t.BlockHeaderExtra[:]); err != nil { - return err - } - return nil -} - -var lengthBufGetControlAddressesReturn = []byte{131} - -func (t *GetControlAddressesReturn) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufGetControlAddressesReturn); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Owner (address.Address) (struct) - if err := t.Owner.MarshalCBOR(w); err != nil { - return err - } - - // t.Worker (address.Address) (struct) - if err := t.Worker.MarshalCBOR(w); err != nil { - return err - } - - // t.ControlAddrs ([]address.Address) (slice) - if len(t.ControlAddrs) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.ControlAddrs was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.ControlAddrs))); err != nil { - return err - } - for _, v := range t.ControlAddrs { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - return nil -} - -func (t *GetControlAddressesReturn) UnmarshalCBOR(r io.Reader) error { - *t = GetControlAddressesReturn{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 3 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Owner (address.Address) (struct) - - { - - if err := t.Owner.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Owner: %w", err) - } - - } - // t.Worker (address.Address) (struct) - - { - - if err := t.Worker.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Worker: %w", err) - } - - } - // t.ControlAddrs ([]address.Address) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.ControlAddrs: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.ControlAddrs = make([]address.Address, extra) - } - - for i := 0; i < int(extra); i++ { - - var v address.Address - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.ControlAddrs[i] = v - } - - return nil -} - -var lengthBufCheckSectorProvenParams = []byte{129} - -func (t *CheckSectorProvenParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufCheckSectorProvenParams); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.SectorNumber (abi.SectorNumber) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SectorNumber)); err != nil { - return err - } - - return nil -} - -func (t *CheckSectorProvenParams) UnmarshalCBOR(r io.Reader) error { - *t = CheckSectorProvenParams{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.SectorNumber (abi.SectorNumber) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.SectorNumber = abi.SectorNumber(extra) - - } - return nil -} - -var lengthBufWithdrawBalanceParams = []byte{129} - -func (t *WithdrawBalanceParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufWithdrawBalanceParams); err != nil { - return err - } - - // t.AmountRequested (big.Int) (struct) - if err := t.AmountRequested.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *WithdrawBalanceParams) UnmarshalCBOR(r io.Reader) error { - *t = WithdrawBalanceParams{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.AmountRequested (big.Int) (struct) - - { - - if err := t.AmountRequested.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.AmountRequested: %w", err) - } - - } - return nil -} - -var lengthBufCompactPartitionsParams = []byte{130} - -func (t *CompactPartitionsParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufCompactPartitionsParams); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Deadline (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Deadline)); err != nil { - return err - } - - // t.Partitions (bitfield.BitField) (struct) - if err := t.Partitions.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *CompactPartitionsParams) UnmarshalCBOR(r io.Reader) error { - *t = CompactPartitionsParams{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Deadline (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Deadline = uint64(extra) - - } - // t.Partitions (bitfield.BitField) (struct) - - { - - if err := t.Partitions.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Partitions: %w", err) - } - - } - return nil -} - -var lengthBufCompactSectorNumbersParams = []byte{129} - -func (t *CompactSectorNumbersParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufCompactSectorNumbersParams); err != nil { - return err - } - - // t.MaskSectorNumbers (bitfield.BitField) (struct) - if err := t.MaskSectorNumbers.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *CompactSectorNumbersParams) UnmarshalCBOR(r io.Reader) error { - *t = CompactSectorNumbersParams{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.MaskSectorNumbers (bitfield.BitField) (struct) - - { - - if err := t.MaskSectorNumbers.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.MaskSectorNumbers: %w", err) - } - - } - return nil -} - -var lengthBufCronEventPayload = []byte{129} - -func (t *CronEventPayload) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufCronEventPayload); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.EventType (miner.CronEventType) (int64) - if t.EventType >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.EventType)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.EventType-1)); err != nil { - return err - } - } - return nil -} - -func (t *CronEventPayload) UnmarshalCBOR(r io.Reader) error { - *t = CronEventPayload{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.EventType (miner.CronEventType) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.EventType = CronEventType(extraI) - } - return nil -} - -var lengthBufDisputeWindowedPoStParams = []byte{130} - -func (t *DisputeWindowedPoStParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufDisputeWindowedPoStParams); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Deadline (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Deadline)); err != nil { - return err - } - - // t.PoStIndex (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.PoStIndex)); err != nil { - return err - } - - return nil -} - -func (t *DisputeWindowedPoStParams) UnmarshalCBOR(r io.Reader) error { - *t = DisputeWindowedPoStParams{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Deadline (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Deadline = uint64(extra) - - } - // t.PoStIndex (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.PoStIndex = uint64(extra) - - } - return nil -} - -var lengthBufPreCommitSectorBatchParams = []byte{129} - -func (t *PreCommitSectorBatchParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufPreCommitSectorBatchParams); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Sectors ([]miner.PreCommitSectorParams) (slice) - if len(t.Sectors) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Sectors was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Sectors))); err != nil { - return err - } - for _, v := range t.Sectors { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - return nil -} - -func (t *PreCommitSectorBatchParams) UnmarshalCBOR(r io.Reader) error { - *t = PreCommitSectorBatchParams{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Sectors ([]miner.PreCommitSectorParams) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Sectors: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Sectors = make([]PreCommitSectorParams, extra) - } - - for i := 0; i < int(extra); i++ { - - var v PreCommitSectorParams - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.Sectors[i] = v - } - - return nil -} - -var lengthBufPreCommitSectorBatchParams2 = []byte{129} - -func (t *PreCommitSectorBatchParams2) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufPreCommitSectorBatchParams2); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Sectors ([]miner.SectorPreCommitInfo) (slice) - if len(t.Sectors) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Sectors was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Sectors))); err != nil { - return err - } - for _, v := range t.Sectors { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - return nil -} - -func (t *PreCommitSectorBatchParams2) UnmarshalCBOR(r io.Reader) error { - *t = PreCommitSectorBatchParams2{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Sectors ([]miner.SectorPreCommitInfo) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Sectors: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Sectors = make([]SectorPreCommitInfo, extra) - } - - for i := 0; i < int(extra); i++ { - - var v SectorPreCommitInfo - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.Sectors[i] = v - } - - return nil -} - -var lengthBufPreCommitSectorParams = []byte{138} - -func (t *PreCommitSectorParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufPreCommitSectorParams); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.SealProof (abi.RegisteredSealProof) (int64) - if t.SealProof >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SealProof)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.SealProof-1)); err != nil { - return err - } - } - - // t.SectorNumber (abi.SectorNumber) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SectorNumber)); err != nil { - return err - } - - // t.SealedCID (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.SealedCID); err != nil { - return xerrors.Errorf("failed to write cid field t.SealedCID: %w", err) - } - - // t.SealRandEpoch (abi.ChainEpoch) (int64) - if t.SealRandEpoch >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SealRandEpoch)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.SealRandEpoch-1)); err != nil { - return err - } - } - - // t.DealIDs ([]abi.DealID) (slice) - if len(t.DealIDs) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.DealIDs was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.DealIDs))); err != nil { - return err - } - for _, v := range t.DealIDs { - if err := cbg.CborWriteHeader(w, cbg.MajUnsignedInt, uint64(v)); err != nil { - return err - } - } - - // t.Expiration (abi.ChainEpoch) (int64) - if t.Expiration >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Expiration)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Expiration-1)); err != nil { - return err - } - } - - // t.ReplaceCapacity (bool) (bool) - if err := cbg.WriteBool(w, t.ReplaceCapacity); err != nil { - return err - } - - // t.ReplaceSectorDeadline (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.ReplaceSectorDeadline)); err != nil { - return err - } - - // t.ReplaceSectorPartition (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.ReplaceSectorPartition)); err != nil { - return err - } - - // t.ReplaceSectorNumber (abi.SectorNumber) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.ReplaceSectorNumber)); err != nil { - return err - } - - return nil -} - -func (t *PreCommitSectorParams) UnmarshalCBOR(r io.Reader) error { - *t = PreCommitSectorParams{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 10 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.SealProof (abi.RegisteredSealProof) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.SealProof = abi.RegisteredSealProof(extraI) - } - // t.SectorNumber (abi.SectorNumber) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.SectorNumber = abi.SectorNumber(extra) - - } - // t.SealedCID (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.SealedCID: %w", err) - } - - t.SealedCID = c - - } - // t.SealRandEpoch (abi.ChainEpoch) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.SealRandEpoch = abi.ChainEpoch(extraI) - } - // t.DealIDs ([]abi.DealID) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.DealIDs: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.DealIDs = make([]abi.DealID, extra) - } - - for i := 0; i < int(extra); i++ { - - maj, val, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return xerrors.Errorf("failed to read uint64 for t.DealIDs slice: %w", err) - } - - if maj != cbg.MajUnsignedInt { - return xerrors.Errorf("value read for array t.DealIDs was not a uint, instead got %d", maj) - } - - t.DealIDs[i] = abi.DealID(val) - } - - // t.Expiration (abi.ChainEpoch) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.Expiration = abi.ChainEpoch(extraI) - } - // t.ReplaceCapacity (bool) (bool) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajOther { - return fmt.Errorf("booleans must be major type 7") - } - switch extra { - case 20: - t.ReplaceCapacity = false - case 21: - t.ReplaceCapacity = true - default: - return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) - } - // t.ReplaceSectorDeadline (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.ReplaceSectorDeadline = uint64(extra) - - } - // t.ReplaceSectorPartition (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.ReplaceSectorPartition = uint64(extra) - - } - // t.ReplaceSectorNumber (abi.SectorNumber) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.ReplaceSectorNumber = abi.SectorNumber(extra) - - } - return nil -} - -var lengthBufProveReplicaUpdatesParams = []byte{129} - -func (t *ProveReplicaUpdatesParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufProveReplicaUpdatesParams); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Updates ([]miner.ReplicaUpdate) (slice) - if len(t.Updates) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Updates was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Updates))); err != nil { - return err - } - for _, v := range t.Updates { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - return nil -} - -func (t *ProveReplicaUpdatesParams) UnmarshalCBOR(r io.Reader) error { - *t = ProveReplicaUpdatesParams{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Updates ([]miner.ReplicaUpdate) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Updates: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Updates = make([]ReplicaUpdate, extra) - } - - for i := 0; i < int(extra); i++ { - - var v ReplicaUpdate - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.Updates[i] = v - } - - return nil -} - -var lengthBufProveReplicaUpdatesParams2 = []byte{129} - -func (t *ProveReplicaUpdatesParams2) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufProveReplicaUpdatesParams2); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Updates ([]miner.ReplicaUpdate2) (slice) - if len(t.Updates) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Updates was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Updates))); err != nil { - return err - } - for _, v := range t.Updates { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - return nil -} - -func (t *ProveReplicaUpdatesParams2) UnmarshalCBOR(r io.Reader) error { - *t = ProveReplicaUpdatesParams2{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Updates ([]miner.ReplicaUpdate2) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Updates: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Updates = make([]ReplicaUpdate2, extra) - } - - for i := 0; i < int(extra); i++ { - - var v ReplicaUpdate2 - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.Updates[i] = v - } - - return nil -} - -var lengthBufFaultDeclaration = []byte{131} - -func (t *FaultDeclaration) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufFaultDeclaration); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Deadline (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Deadline)); err != nil { - return err - } - - // t.Partition (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Partition)); err != nil { - return err - } - - // t.Sectors (bitfield.BitField) (struct) - if err := t.Sectors.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *FaultDeclaration) UnmarshalCBOR(r io.Reader) error { - *t = FaultDeclaration{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 3 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Deadline (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Deadline = uint64(extra) - - } - // t.Partition (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Partition = uint64(extra) - - } - // t.Sectors (bitfield.BitField) (struct) - - { - - if err := t.Sectors.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Sectors: %w", err) - } - - } - return nil -} - -var lengthBufRecoveryDeclaration = []byte{131} - -func (t *RecoveryDeclaration) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufRecoveryDeclaration); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Deadline (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Deadline)); err != nil { - return err - } - - // t.Partition (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Partition)); err != nil { - return err - } - - // t.Sectors (bitfield.BitField) (struct) - if err := t.Sectors.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *RecoveryDeclaration) UnmarshalCBOR(r io.Reader) error { - *t = RecoveryDeclaration{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 3 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Deadline (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Deadline = uint64(extra) - - } - // t.Partition (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Partition = uint64(extra) - - } - // t.Sectors (bitfield.BitField) (struct) - - { - - if err := t.Sectors.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Sectors: %w", err) - } - - } - return nil -} - -var lengthBufExpirationExtension = []byte{132} - -func (t *ExpirationExtension) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufExpirationExtension); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Deadline (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Deadline)); err != nil { - return err - } - - // t.Partition (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Partition)); err != nil { - return err - } - - // t.Sectors (bitfield.BitField) (struct) - if err := t.Sectors.MarshalCBOR(w); err != nil { - return err - } - - // t.NewExpiration (abi.ChainEpoch) (int64) - if t.NewExpiration >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.NewExpiration)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.NewExpiration-1)); err != nil { - return err - } - } - return nil -} - -func (t *ExpirationExtension) UnmarshalCBOR(r io.Reader) error { - *t = ExpirationExtension{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 4 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Deadline (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Deadline = uint64(extra) - - } - // t.Partition (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Partition = uint64(extra) - - } - // t.Sectors (bitfield.BitField) (struct) - - { - - if err := t.Sectors.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Sectors: %w", err) - } - - } - // t.NewExpiration (abi.ChainEpoch) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.NewExpiration = abi.ChainEpoch(extraI) - } - return nil -} - -var lengthBufTerminationDeclaration = []byte{131} - -func (t *TerminationDeclaration) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufTerminationDeclaration); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Deadline (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Deadline)); err != nil { - return err - } - - // t.Partition (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Partition)); err != nil { - return err - } - - // t.Sectors (bitfield.BitField) (struct) - if err := t.Sectors.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *TerminationDeclaration) UnmarshalCBOR(r io.Reader) error { - *t = TerminationDeclaration{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 3 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Deadline (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Deadline = uint64(extra) - - } - // t.Partition (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Partition = uint64(extra) - - } - // t.Sectors (bitfield.BitField) (struct) - - { - - if err := t.Sectors.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Sectors: %w", err) - } - - } - return nil -} - -var lengthBufPoStPartition = []byte{130} - -func (t *PoStPartition) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufPoStPartition); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Index (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Index)); err != nil { - return err - } - - // t.Skipped (bitfield.BitField) (struct) - if err := t.Skipped.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *PoStPartition) UnmarshalCBOR(r io.Reader) error { - *t = PoStPartition{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Index (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Index = uint64(extra) - - } - // t.Skipped (bitfield.BitField) (struct) - - { - - if err := t.Skipped.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Skipped: %w", err) - } - - } - return nil -} - -var lengthBufReplicaUpdate = []byte{135} - -func (t *ReplicaUpdate) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufReplicaUpdate); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.SectorID (abi.SectorNumber) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SectorID)); err != nil { - return err - } - - // t.Deadline (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Deadline)); err != nil { - return err - } - - // t.Partition (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Partition)); err != nil { - return err - } - - // t.NewSealedSectorCID (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.NewSealedSectorCID); err != nil { - return xerrors.Errorf("failed to write cid field t.NewSealedSectorCID: %w", err) - } - - // t.Deals ([]abi.DealID) (slice) - if len(t.Deals) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Deals was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Deals))); err != nil { - return err - } - for _, v := range t.Deals { - if err := cbg.CborWriteHeader(w, cbg.MajUnsignedInt, uint64(v)); err != nil { - return err - } - } - - // t.UpdateProofType (abi.RegisteredUpdateProof) (int64) - if t.UpdateProofType >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.UpdateProofType)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.UpdateProofType-1)); err != nil { - return err - } - } - - // t.ReplicaProof ([]uint8) (slice) - if len(t.ReplicaProof) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.ReplicaProof was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.ReplicaProof))); err != nil { - return err - } - - if _, err := w.Write(t.ReplicaProof[:]); err != nil { - return err - } - return nil -} - -func (t *ReplicaUpdate) UnmarshalCBOR(r io.Reader) error { - *t = ReplicaUpdate{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 7 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.SectorID (abi.SectorNumber) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.SectorID = abi.SectorNumber(extra) - - } - // t.Deadline (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Deadline = uint64(extra) - - } - // t.Partition (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Partition = uint64(extra) - - } - // t.NewSealedSectorCID (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.NewSealedSectorCID: %w", err) - } - - t.NewSealedSectorCID = c - - } - // t.Deals ([]abi.DealID) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Deals: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Deals = make([]abi.DealID, extra) - } - - for i := 0; i < int(extra); i++ { - - maj, val, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return xerrors.Errorf("failed to read uint64 for t.Deals slice: %w", err) - } - - if maj != cbg.MajUnsignedInt { - return xerrors.Errorf("value read for array t.Deals was not a uint, instead got %d", maj) - } - - t.Deals[i] = abi.DealID(val) - } - - // t.UpdateProofType (abi.RegisteredUpdateProof) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.UpdateProofType = abi.RegisteredUpdateProof(extraI) - } - // t.ReplicaProof ([]uint8) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.ReplicaProof: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - - if extra > 0 { - t.ReplicaProof = make([]uint8, extra) - } - - if _, err := io.ReadFull(br, t.ReplicaProof[:]); err != nil { - return err - } - return nil -} - -var lengthBufReplicaUpdate2 = []byte{136} - -func (t *ReplicaUpdate2) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufReplicaUpdate2); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.SectorID (abi.SectorNumber) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SectorID)); err != nil { - return err - } - - // t.Deadline (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Deadline)); err != nil { - return err - } - - // t.Partition (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Partition)); err != nil { - return err - } - - // t.NewSealedSectorCID (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.NewSealedSectorCID); err != nil { - return xerrors.Errorf("failed to write cid field t.NewSealedSectorCID: %w", err) - } - - // t.NewUnsealedSectorCID (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.NewUnsealedSectorCID); err != nil { - return xerrors.Errorf("failed to write cid field t.NewUnsealedSectorCID: %w", err) - } - - // t.Deals ([]abi.DealID) (slice) - if len(t.Deals) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Deals was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Deals))); err != nil { - return err - } - for _, v := range t.Deals { - if err := cbg.CborWriteHeader(w, cbg.MajUnsignedInt, uint64(v)); err != nil { - return err - } - } - - // t.UpdateProofType (abi.RegisteredUpdateProof) (int64) - if t.UpdateProofType >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.UpdateProofType)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.UpdateProofType-1)); err != nil { - return err - } - } - - // t.ReplicaProof ([]uint8) (slice) - if len(t.ReplicaProof) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.ReplicaProof was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.ReplicaProof))); err != nil { - return err - } - - if _, err := w.Write(t.ReplicaProof[:]); err != nil { - return err - } - return nil -} - -func (t *ReplicaUpdate2) UnmarshalCBOR(r io.Reader) error { - *t = ReplicaUpdate2{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 8 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.SectorID (abi.SectorNumber) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.SectorID = abi.SectorNumber(extra) - - } - // t.Deadline (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Deadline = uint64(extra) - - } - // t.Partition (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Partition = uint64(extra) - - } - // t.NewSealedSectorCID (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.NewSealedSectorCID: %w", err) - } - - t.NewSealedSectorCID = c - - } - // t.NewUnsealedSectorCID (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.NewUnsealedSectorCID: %w", err) - } - - t.NewUnsealedSectorCID = c - - } - // t.Deals ([]abi.DealID) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Deals: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Deals = make([]abi.DealID, extra) - } - - for i := 0; i < int(extra); i++ { - - maj, val, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return xerrors.Errorf("failed to read uint64 for t.Deals slice: %w", err) - } - - if maj != cbg.MajUnsignedInt { - return xerrors.Errorf("value read for array t.Deals was not a uint, instead got %d", maj) - } - - t.Deals[i] = abi.DealID(val) - } - - // t.UpdateProofType (abi.RegisteredUpdateProof) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.UpdateProofType = abi.RegisteredUpdateProof(extraI) - } - // t.ReplicaProof ([]uint8) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.ReplicaProof: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - - if extra > 0 { - t.ReplicaProof = make([]uint8, extra) - } - - if _, err := io.ReadFull(br, t.ReplicaProof[:]); err != nil { - return err - } - return nil -} - -var lengthBufGetBeneficiaryReturn = []byte{130} - -func (t *GetBeneficiaryReturn) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufGetBeneficiaryReturn); err != nil { - return err - } - - // t.Active (miner.ActiveBeneficiary) (struct) - if err := t.Active.MarshalCBOR(w); err != nil { - return err - } - - // t.Proposed (miner.PendingBeneficiaryChange) (struct) - if err := t.Proposed.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *GetBeneficiaryReturn) UnmarshalCBOR(r io.Reader) error { - *t = GetBeneficiaryReturn{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Active (miner.ActiveBeneficiary) (struct) - - { - - if err := t.Active.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Active: %w", err) - } - - } - // t.Proposed (miner.PendingBeneficiaryChange) (struct) - - { - - b, err := br.ReadByte() - if err != nil { - return err - } - if b != cbg.CborNull[0] { - if err := br.UnreadByte(); err != nil { - return err - } - t.Proposed = new(PendingBeneficiaryChange) - if err := t.Proposed.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Proposed pointer: %w", err) - } - } - - } - return nil -} - -var lengthBufChangeBeneficiaryParams = []byte{131} - -func (t *ChangeBeneficiaryParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufChangeBeneficiaryParams); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.NewBeneficiary (address.Address) (struct) - if err := t.NewBeneficiary.MarshalCBOR(w); err != nil { - return err - } - - // t.NewQuota (big.Int) (struct) - if err := t.NewQuota.MarshalCBOR(w); err != nil { - return err - } - - // t.NewExpiration (abi.ChainEpoch) (int64) - if t.NewExpiration >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.NewExpiration)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.NewExpiration-1)); err != nil { - return err - } - } - return nil -} - -func (t *ChangeBeneficiaryParams) UnmarshalCBOR(r io.Reader) error { - *t = ChangeBeneficiaryParams{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 3 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.NewBeneficiary (address.Address) (struct) - - { - - if err := t.NewBeneficiary.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.NewBeneficiary: %w", err) - } - - } - // t.NewQuota (big.Int) (struct) - - { - - if err := t.NewQuota.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.NewQuota: %w", err) - } - - } - // t.NewExpiration (abi.ChainEpoch) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.NewExpiration = abi.ChainEpoch(extraI) - } - return nil -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/miner/deadline_state.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/miner/deadline_state.go deleted file mode 100644 index b189817..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/miner/deadline_state.go +++ /dev/null @@ -1,192 +0,0 @@ -package miner - -import ( - "github.com/filecoin-project/go-bitfield" - "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/go-state-types/builtin/v9/util/adt" - xc "github.com/filecoin-project/go-state-types/exitcode" - "github.com/filecoin-project/go-state-types/proof" - "github.com/ipfs/go-cid" - "golang.org/x/xerrors" -) - -// Deadlines contains Deadline objects, describing the sectors due at the given -// deadline and their state (faulty, terminated, recovering, etc.). -type Deadlines struct { - // Note: we could inline part of the deadline struct (e.g., active/assigned sectors) - // to make new sector assignment cheaper. At the moment, assigning a sector requires - // loading all deadlines to figure out where best to assign new sectors. - Due [WPoStPeriodDeadlines]cid.Cid // []Deadline -} - -// Deadline holds the state for all sectors due at a specific deadline. -type Deadline struct { - // Partitions in this deadline, in order. - // The keys of this AMT are always sequential integers beginning with zero. - Partitions cid.Cid // AMT[PartitionNumber]Partition - - // Maps epochs to partitions that _may_ have sectors that expire in or - // before that epoch, either on-time or early as faults. - // Keys are quantized to final epochs in each proving deadline. - // - // NOTE: Partitions MUST NOT be removed from this queue (until the - // associated epoch has passed) even if they no longer have sectors - // expiring at that epoch. Sectors expiring at this epoch may later be - // recovered, and this queue will not be updated at that time. - ExpirationsEpochs cid.Cid // AMT[ChainEpoch]BitField - - // Partitions that have been proved by window PoSts so far during the - // current challenge window. - // NOTE: This bitfield includes both partitions whose proofs - // were optimistically accepted and stored in - // OptimisticPoStSubmissions, and those whose proofs were - // verified on-chain. - PartitionsPoSted bitfield.BitField - - // Partitions with sectors that terminated early. - EarlyTerminations bitfield.BitField - - // The number of non-terminated sectors in this deadline (incl faulty). - LiveSectors uint64 - - // The total number of sectors in this deadline (incl dead). - TotalSectors uint64 - - // Memoized sum of faulty power in partitions. - FaultyPower PowerPair - - // AMT of optimistically accepted WindowPoSt proofs, submitted during - // the current challenge window. At the end of the challenge window, - // this AMT will be moved to OptimisticPoStSubmissionsSnapshot. WindowPoSt proofs - // verified on-chain do not appear in this AMT. - OptimisticPoStSubmissions cid.Cid // AMT[]WindowedPoSt - - // Snapshot of the miner's sectors AMT at the end of the previous challenge - // window for this deadline. - SectorsSnapshot cid.Cid - - // Snapshot of partition state at the end of the previous challenge - // window for this deadline. - PartitionsSnapshot cid.Cid - - // Snapshot of the proofs submitted by the end of the previous challenge - // window for this deadline. - // - // These proofs may be disputed via DisputeWindowedPoSt. Successfully - // disputed window PoSts are removed from the snapshot. - OptimisticPoStSubmissionsSnapshot cid.Cid -} - -type WindowedPoSt struct { - // Partitions proved by this WindowedPoSt. - Partitions bitfield.BitField - // Array of proofs, one per distinct registered proof type present in - // the sectors being proven. In the usual case of a single proof type, - // this array will always have a single element (independent of number - // of partitions). - Proofs []proof.PoStProof -} - -// Bitwidth of AMTs determined empirically from mutation patterns and projections of mainnet data. -const DeadlinePartitionsAmtBitwidth = 3 // Usually a small array -const DeadlineExpirationAmtBitwidth = 5 - -// Given that 4 partitions can be proven in one post, this AMT's height will -// only exceed the partition AMT's height at ~0.75EiB of storage. -const DeadlineOptimisticPoStSubmissionsAmtBitwidth = 2 - -// -// Deadlines (plural) -// - -func (d *Deadlines) LoadDeadline(store adt.Store, dlIdx uint64) (*Deadline, error) { - if dlIdx >= uint64(len(d.Due)) { - return nil, xc.ErrIllegalArgument.Wrapf("invalid deadline %d", dlIdx) - } - deadline := new(Deadline) - err := store.Get(store.Context(), d.Due[dlIdx], deadline) - if err != nil { - return nil, xc.ErrIllegalState.Wrapf("failed to lookup deadline %d: %w", dlIdx, err) - } - return deadline, nil -} - -func (d *Deadlines) ForEach(store adt.Store, cb func(dlIdx uint64, dl *Deadline) error) error { - for dlIdx := range d.Due { - dl, err := d.LoadDeadline(store, uint64(dlIdx)) - if err != nil { - return err - } - err = cb(uint64(dlIdx), dl) - if err != nil { - return err - } - } - return nil -} - -func (d *Deadlines) UpdateDeadline(store adt.Store, dlIdx uint64, deadline *Deadline) error { - if dlIdx >= uint64(len(d.Due)) { - return xerrors.Errorf("invalid deadline %d", dlIdx) - } - - if err := deadline.ValidateState(); err != nil { - return err - } - - dlCid, err := store.Put(store.Context(), deadline) - if err != nil { - return err - } - d.Due[dlIdx] = dlCid - - return nil -} - -// -// Deadline (singular) -// - -func (d *Deadline) PartitionsArray(store adt.Store) (*adt.Array, error) { - arr, err := adt.AsArray(store, d.Partitions, DeadlinePartitionsAmtBitwidth) - if err != nil { - return nil, xc.ErrIllegalState.Wrapf("failed to load partitions: %w", err) - } - return arr, nil -} - -func (d *Deadline) OptimisticProofsSnapshotArray(store adt.Store) (*adt.Array, error) { - arr, err := adt.AsArray(store, d.OptimisticPoStSubmissionsSnapshot, DeadlineOptimisticPoStSubmissionsAmtBitwidth) - if err != nil { - return nil, xerrors.Errorf("failed to load proofs snapshot: %w", err) - } - return arr, nil -} - -func (d *Deadline) LoadPartition(store adt.Store, partIdx uint64) (*Partition, error) { - partitions, err := d.PartitionsArray(store) - if err != nil { - return nil, err - } - var partition Partition - found, err := partitions.Get(partIdx, &partition) - if err != nil { - return nil, xc.ErrIllegalState.Wrapf("failed to lookup partition %d: %w", partIdx, err) - } - if !found { - return nil, xc.ErrNotFound.Wrapf("no partition %d", partIdx) - } - return &partition, nil -} - -func (d *Deadline) ValidateState() error { - if d.LiveSectors > d.TotalSectors { - return xerrors.Errorf("Deadline left with more live sectors than total: %v", d) - } - - if d.FaultyPower.Raw.LessThan(big.Zero()) || d.FaultyPower.QA.LessThan(big.Zero()) { - return xerrors.Errorf("Deadline left with negative faulty power: %v", d) - } - - return nil -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/miner/deadlines.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/miner/deadlines.go deleted file mode 100644 index dbab28c..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/miner/deadlines.go +++ /dev/null @@ -1,58 +0,0 @@ -package miner - -import ( - "errors" - - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/builtin" - "github.com/filecoin-project/go-state-types/builtin/v9/util/adt" - "github.com/filecoin-project/go-state-types/dline" - "golang.org/x/xerrors" -) - -// Returns deadline-related calculations for a deadline in some proving period and the current epoch. -func NewDeadlineInfo(periodStart abi.ChainEpoch, deadlineIdx uint64, currEpoch abi.ChainEpoch) *dline.Info { - return dline.NewInfo(periodStart, deadlineIdx, currEpoch, WPoStPeriodDeadlines, WPoStProvingPeriod, WPoStChallengeWindow, WPoStChallengeLookback, FaultDeclarationCutoff) -} - -func QuantSpecForDeadline(di *dline.Info) builtin.QuantSpec { - return builtin.NewQuantSpec(WPoStProvingPeriod, di.Last()) -} - -// FindSector returns the deadline and partition index for a sector number. -// It returns an error if the sector number is not tracked by deadlines. -func FindSector(store adt.Store, deadlines *Deadlines, sectorNum abi.SectorNumber) (uint64, uint64, error) { - for dlIdx := range deadlines.Due { - dl, err := deadlines.LoadDeadline(store, uint64(dlIdx)) - if err != nil { - return 0, 0, err - } - - partitions, err := adt.AsArray(store, dl.Partitions, DeadlinePartitionsAmtBitwidth) - if err != nil { - return 0, 0, err - } - var partition Partition - - partIdx := uint64(0) - stopErr := errors.New("stop") - err = partitions.ForEach(&partition, func(i int64) error { - found, err := partition.Sectors.IsSet(uint64(sectorNum)) - if err != nil { - return err - } - if found { - partIdx = uint64(i) - return stopErr - } - return nil - }) - if err == stopErr { - return uint64(dlIdx), partIdx, nil - } else if err != nil { - return 0, 0, err - } - - } - return 0, 0, xerrors.Errorf("sector %d not due at any deadline", sectorNum) -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/miner/methods.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/miner/methods.go deleted file mode 100644 index 8c83428..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/miner/methods.go +++ /dev/null @@ -1,42 +0,0 @@ -package miner - -import ( - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-bitfield" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/builtin/v9/power" -) - -var Methods = []interface{}{ - 1: *new(func(interface{}, *power.MinerConstructorParams) *abi.EmptyValue), // Constructor - 2: *new(func(interface{}, *abi.EmptyValue) *GetControlAddressesReturn), // ControlAddresses - 3: *new(func(interface{}, *ChangeWorkerAddressParams) *abi.EmptyValue), // ChangeWorkerAddress - 4: *new(func(interface{}, *ChangePeerIDParams) *abi.EmptyValue), // ChangePeerID - 5: *new(func(interface{}, *SubmitWindowedPoStParams) *abi.EmptyValue), // SubmitWindowedPoSt - 6: *new(func(interface{}, *PreCommitSectorParams) *abi.EmptyValue), // PreCommitSector - 7: *new(func(interface{}, *ProveCommitSectorParams) *abi.EmptyValue), // ProveCommitSector - 8: *new(func(interface{}, *ExtendSectorExpirationParams) *abi.EmptyValue), // ExtendSectorExpiration - 9: *new(func(interface{}, *TerminateSectorsParams) *TerminateSectorsReturn), // TerminateSectors - 10: *new(func(interface{}, *DeclareFaultsParams) *abi.EmptyValue), // DeclareFaults - 11: *new(func(interface{}, *DeclareFaultsRecoveredParams) *abi.EmptyValue), // DeclareFaultsRecovered - 12: *new(func(interface{}, *DeferredCronEventParams) *abi.EmptyValue), // OnDeferredCronEvent - 13: *new(func(interface{}, *CheckSectorProvenParams) *abi.EmptyValue), // CheckSectorProven - 14: *new(func(interface{}, *ApplyRewardParams) *abi.EmptyValue), // ApplyRewards - 15: *new(func(interface{}, *ReportConsensusFaultParams) *abi.EmptyValue), // ReportConsensusFault - 16: *new(func(interface{}, *WithdrawBalanceParams) *abi.TokenAmount), // WithdrawBalance - 17: *new(func(interface{}, *ConfirmSectorProofsParams) *abi.EmptyValue), // ConfirmSectorProofsValid - 18: *new(func(interface{}, *ChangeMultiaddrsParams) *abi.EmptyValue), // ChangeMultiaddrs - 19: *new(func(interface{}, *CompactPartitionsParams) *abi.EmptyValue), // CompactPartitions - 20: *new(func(interface{}, *CompactSectorNumbersParams) *abi.EmptyValue), // CompactSectorNumbers - 21: *new(func(interface{}, *abi.EmptyValue) *abi.EmptyValue), // ConfirmUpdateWorkerKey - 22: *new(func(interface{}, *abi.EmptyValue) *abi.EmptyValue), // RepayDebt - 23: *new(func(interface{}, *address.Address) *abi.EmptyValue), // ChangeOwnerAddress - 24: *new(func(interface{}, *DisputeWindowedPoStParams) *abi.EmptyValue), // DisputeWindowedPoSt - 25: *new(func(interface{}, *PreCommitSectorBatchParams) *abi.EmptyValue), // PreCommitSectorBatch - 26: *new(func(interface{}, *ProveCommitAggregateParams) *abi.EmptyValue), // ProveCommitAggregate - 27: *new(func(interface{}, *ProveReplicaUpdatesParams) *bitfield.BitField), // ProveReplicaUpdates - 28: *new(func(interface{}, *PreCommitSectorBatchParams2) *abi.EmptyValue), // PreCommitSectorBatch2 - 29: *new(func(interface{}, *ProveReplicaUpdatesParams2) *bitfield.BitField), // ProveReplicaUpdates2 - 30: *new(func(interface{}, *ChangeBeneficiaryParams) *abi.EmptyValue), // ChangeBeneficiary - 31: *new(func(interface{}, *abi.EmptyValue) *GetBeneficiaryReturn), // GetBeneficiary -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/miner/miner_state.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/miner/miner_state.go deleted file mode 100644 index a021101..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/miner/miner_state.go +++ /dev/null @@ -1,320 +0,0 @@ -package miner - -import ( - addr "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-bitfield" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/go-state-types/builtin" - "github.com/filecoin-project/go-state-types/builtin/v9/util/adt" - "github.com/filecoin-project/go-state-types/dline" - xc "github.com/filecoin-project/go-state-types/exitcode" - cid "github.com/ipfs/go-cid" - "golang.org/x/xerrors" -) - -// Balance of Miner Actor should be greater than or equal to -// the sum of PreCommitDeposits and LockedFunds. -// It is possible for balance to fall below the sum of -// PCD, LF and InitialPledgeRequirements, and this is a bad -// state (IP Debt) that limits a miner actor's behavior (i.e. no balance withdrawals) -// Excess balance as computed by st.GetAvailableBalance will be -// withdrawable or usable for pre-commit deposit or pledge lock-up. -type State struct { - // Information not related to sectors. - Info cid.Cid - - PreCommitDeposits abi.TokenAmount // Total funds locked as PreCommitDeposits - LockedFunds abi.TokenAmount // Total rewards and added funds locked in vesting table - - VestingFunds cid.Cid // VestingFunds (Vesting Funds schedule for the miner). - - FeeDebt abi.TokenAmount // Absolute value of debt this miner owes from unpaid fees - - InitialPledge abi.TokenAmount // Sum of initial pledge requirements of all active sectors - - // Sectors that have been pre-committed but not yet proven. - PreCommittedSectors cid.Cid // Map, HAMT[SectorNumber]SectorPreCommitOnChainInfo - - // PreCommittedSectorsCleanUp maintains the state required to cleanup expired PreCommittedSectors. - PreCommittedSectorsCleanUp cid.Cid // BitFieldQueue (AMT[Epoch]*BitField) - - // Allocated sector IDs. Sector IDs can never be reused once allocated. - AllocatedSectors cid.Cid // BitField - - // Information for all proven and not-yet-garbage-collected sectors. - // - // Sectors are removed from this AMT when the partition to which the - // sector belongs is compacted. - Sectors cid.Cid // Array, AMT[SectorNumber]SectorOnChainInfo (sparse) - - // DEPRECATED. This field will change names and no longer be updated every proving period in a future upgrade - // The first epoch in this miner's current proving period. This is the first epoch in which a PoSt for a - // partition at the miner's first deadline may arrive. Alternatively, it is after the last epoch at which - // a PoSt for the previous window is valid. - // Always greater than zero, this may be greater than the current epoch for genesis miners in the first - // WPoStProvingPeriod epochs of the chain; the epochs before the first proving period starts are exempt from Window - // PoSt requirements. - // Updated at the end of every period by a cron callback. - ProvingPeriodStart abi.ChainEpoch - - // DEPRECATED. This field will be removed from state in a future upgrade. - // Index of the deadline within the proving period beginning at ProvingPeriodStart that has not yet been - // finalized. - // Updated at the end of each deadline window by a cron callback. - CurrentDeadline uint64 - - // The sector numbers due for PoSt at each deadline in the current proving period, frozen at period start. - // New sectors are added and expired ones removed at proving period boundary. - // Faults are not subtracted from this in state, but on the fly. - Deadlines cid.Cid - - // Deadlines with outstanding fees for early sector termination. - EarlyTerminations bitfield.BitField - - // True when miner cron is active, false otherwise - DeadlineCronActive bool -} - -// Bitwidth of AMTs determined empirically from mutation patterns and projections of mainnet data. -const PrecommitCleanUpAmtBitwidth = 6 -const SectorsAmtBitwidth = 5 - -type MinerInfo struct { - // Account that owns this miner. - // - Income and returned collateral are paid to this address. - // - This address is also allowed to change the worker address for the miner. - Owner addr.Address // Must be an ID-address. - - // Worker account for this miner. - // The associated pubkey-type address is used to sign blocks and messages on behalf of this miner. - Worker addr.Address // Must be an ID-address. - - // Additional addresses that are permitted to submit messages controlling this actor (optional). - ControlAddresses []addr.Address // Must all be ID addresses. - - PendingWorkerKey *WorkerKeyChange - - // Byte array representing a Libp2p identity that should be used when connecting to this miner. - PeerId abi.PeerID - - // Slice of byte arrays representing Libp2p multi-addresses used for establishing a connection with this miner. - Multiaddrs []abi.Multiaddrs - - // The proof type used for Window PoSt for this miner. - // A miner may commit sectors with different seal proof types (but compatible sector size and - // corresponding PoSt proof types). - WindowPoStProofType abi.RegisteredPoStProof - - // Amount of space in each sector committed by this miner. - // This is computed from the proof type and represented here redundantly. - SectorSize abi.SectorSize - - // The number of sectors in each Window PoSt partition (proof). - // This is computed from the proof type and represented here redundantly. - WindowPoStPartitionSectors uint64 - - // The next epoch this miner is eligible for certain permissioned actor methods - // and winning block elections as a result of being reported for a consensus fault. - ConsensusFaultElapsed abi.ChainEpoch - - // A proposed new owner account for this miner. - // Must be confirmed by a message from the pending address itself. - PendingOwnerAddress *addr.Address - - // Beneficiary address for this miner. - // This is the address that tokens will be withdrawn to - Beneficiary addr.Address - - // Beneficiary's withdrawal quota, how much of the quota has been withdrawn, - // and when the Beneficiary expires. - BeneficiaryTerm BeneficiaryTerm - - // A proposed change to `BenificiaryTerm` - PendingBeneficiaryTerm *PendingBeneficiaryChange -} - -type WorkerKeyChange struct { - NewWorker addr.Address // Must be an ID address - EffectiveAt abi.ChainEpoch -} - -// Information provided by a miner when pre-committing a sector. -type SectorPreCommitInfo struct { - SealProof abi.RegisteredSealProof - SectorNumber abi.SectorNumber - SealedCID cid.Cid `checked:"true"` // CommR - SealRandEpoch abi.ChainEpoch - DealIDs []abi.DealID - Expiration abi.ChainEpoch - UnsealedCid *cid.Cid -} - -// Information stored on-chain for a pre-committed sector. -type SectorPreCommitOnChainInfo struct { - Info SectorPreCommitInfo - PreCommitDeposit abi.TokenAmount - PreCommitEpoch abi.ChainEpoch -} - -// Information stored on-chain for a proven sector. -type SectorOnChainInfo struct { - SectorNumber abi.SectorNumber - SealProof abi.RegisteredSealProof // The seal proof type implies the PoSt proof/s - SealedCID cid.Cid // CommR - DealIDs []abi.DealID - Activation abi.ChainEpoch // Epoch during which the sector proof was accepted - Expiration abi.ChainEpoch // Epoch during which the sector expires - DealWeight abi.DealWeight // Integral of active deals over sector lifetime - VerifiedDealWeight abi.DealWeight // Integral of active verified deals over sector lifetime - InitialPledge abi.TokenAmount // Pledge collected to commit this sector - ExpectedDayReward abi.TokenAmount // Expected one day projection of reward for sector computed at activation time - ExpectedStoragePledge abi.TokenAmount // Expected twenty day projection of reward for sector computed at activation time - ReplacedSectorAge abi.ChainEpoch // Age of sector this sector replaced or zero - ReplacedDayReward abi.TokenAmount // Day reward of sector this sector replace or zero - SectorKeyCID *cid.Cid // The original SealedSectorCID, only gets set on the first ReplicaUpdate -} - -func (st *State) GetInfo(store adt.Store) (*MinerInfo, error) { - var info MinerInfo - if err := store.Get(store.Context(), st.Info, &info); err != nil { - return nil, xerrors.Errorf("failed to get miner info %w", err) - } - return &info, nil -} - -type BeneficiaryTerm struct { - Quota abi.TokenAmount - UsedQuota abi.TokenAmount - Expiration abi.ChainEpoch -} - -type PendingBeneficiaryChange struct { - NewBeneficiary addr.Address - NewQuota abi.TokenAmount - NewExpiration abi.ChainEpoch - ApprovedByBeneficiary bool - ApprovedByNominee bool -} - -// Returns deadline calculations for the state recorded proving period and deadline. This is out of date if the a -// miner does not have an active miner cron -func (st *State) RecordedDeadlineInfo(currEpoch abi.ChainEpoch) *dline.Info { - return NewDeadlineInfo(st.ProvingPeriodStart, st.CurrentDeadline, currEpoch) -} - -// Returns deadline calculations for the current (according to state) proving period -func (st *State) QuantSpecForDeadline(dlIdx uint64) builtin.QuantSpec { - return QuantSpecForDeadline(NewDeadlineInfo(st.ProvingPeriodStart, dlIdx, 0)) -} - -func (st *State) GetPrecommittedSector(store adt.Store, sectorNo abi.SectorNumber) (*SectorPreCommitOnChainInfo, bool, error) { - precommitted, err := adt.AsMap(store, st.PreCommittedSectors, builtin.DefaultHamtBitwidth) - if err != nil { - return nil, false, err - } - - var info SectorPreCommitOnChainInfo - found, err := precommitted.Get(SectorKey(sectorNo), &info) - if err != nil { - return nil, false, xerrors.Errorf("failed to load precommitment for %v: %w", sectorNo, err) - } - return &info, found, nil -} - -func (st *State) GetSector(store adt.Store, sectorNo abi.SectorNumber) (*SectorOnChainInfo, bool, error) { - sectors, err := LoadSectors(store, st.Sectors) - if err != nil { - return nil, false, err - } - - return sectors.Get(sectorNo) -} - -func (st *State) FindSector(store adt.Store, sno abi.SectorNumber) (uint64, uint64, error) { - deadlines, err := st.LoadDeadlines(store) - if err != nil { - return 0, 0, err - } - return FindSector(store, deadlines, sno) -} - -func (st *State) LoadDeadlines(store adt.Store) (*Deadlines, error) { - var deadlines Deadlines - if err := store.Get(store.Context(), st.Deadlines, &deadlines); err != nil { - return nil, xc.ErrIllegalState.Wrapf("failed to load deadlines (%s): %w", st.Deadlines, err) - } - - return &deadlines, nil -} - -func (st *State) SaveDeadlines(store adt.Store, deadlines *Deadlines) error { - c, err := store.Put(store.Context(), deadlines) - if err != nil { - return err - } - st.Deadlines = c - return nil -} - -// LoadVestingFunds loads the vesting funds table from the store -func (st *State) LoadVestingFunds(store adt.Store) (*VestingFunds, error) { - var funds VestingFunds - if err := store.Get(store.Context(), st.VestingFunds, &funds); err != nil { - return nil, xerrors.Errorf("failed to load vesting funds (%s): %w", st.VestingFunds, err) - } - - return &funds, nil -} - -// CheckVestedFunds returns the amount of vested funds that have vested before the provided epoch. -func (st *State) CheckVestedFunds(store adt.Store, currEpoch abi.ChainEpoch) (abi.TokenAmount, error) { - vestingFunds, err := st.LoadVestingFunds(store) - if err != nil { - return big.Zero(), xerrors.Errorf("failed to load vesting funds: %w", err) - } - - amountVested := abi.NewTokenAmount(0) - - for i := range vestingFunds.Funds { - vf := vestingFunds.Funds[i] - epoch := vf.Epoch - amount := vf.Amount - - if epoch >= currEpoch { - break - } - - amountVested = big.Add(amountVested, amount) - } - - return amountVested, nil -} - -// Unclaimed funds that are not locked -- includes free funds and does not -// account for fee debt. Always greater than or equal to zero -func (st *State) GetUnlockedBalance(actorBalance abi.TokenAmount) (abi.TokenAmount, error) { - unlockedBalance := big.Subtract(actorBalance, st.LockedFunds, st.PreCommitDeposits, st.InitialPledge) - if unlockedBalance.LessThan(big.Zero()) { - return big.Zero(), xerrors.Errorf("negative unlocked balance %v", unlockedBalance) - } - return unlockedBalance, nil -} - -// Unclaimed funds. Actor balance - (locked funds, precommit deposit, initial pledge, fee debt) -// Can go negative if the miner is in IP debt -func (st *State) GetAvailableBalance(actorBalance abi.TokenAmount) (abi.TokenAmount, error) { - unlockedBalance, err := st.GetUnlockedBalance(actorBalance) - if err != nil { - return big.Zero(), err - } - return big.Subtract(unlockedBalance, st.FeeDebt), nil -} - -// -// Misc helpers -// - -func SectorKey(e abi.SectorNumber) abi.Keyer { - return abi.UIntKey(uint64(e)) -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/miner/miner_types.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/miner/miner_types.go deleted file mode 100644 index 5bec61a..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/miner/miner_types.go +++ /dev/null @@ -1,352 +0,0 @@ -package miner - -import ( - addr "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-bitfield" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/builtin" - "github.com/filecoin-project/go-state-types/builtin/v9/power" - "github.com/filecoin-project/go-state-types/builtin/v9/util/adt" - "github.com/filecoin-project/go-state-types/builtin/v9/util/smoothing" - xc "github.com/filecoin-project/go-state-types/exitcode" - "github.com/filecoin-project/go-state-types/proof" - cid "github.com/ipfs/go-cid" - "golang.org/x/xerrors" -) - -type DeclareFaultsRecoveredParams struct { - Recoveries []RecoveryDeclaration -} - -type RecoveryDeclaration struct { - // The deadline to which the recovered sectors are assigned, in range [0..WPoStPeriodDeadlines) - Deadline uint64 - // Partition index within the deadline containing the recovered sectors. - Partition uint64 - // Sectors in the partition being declared recovered. - Sectors bitfield.BitField -} - -type DeclareFaultsParams struct { - Faults []FaultDeclaration -} - -type FaultDeclaration struct { - // The deadline to which the faulty sectors are assigned, in range [0..WPoStPeriodDeadlines) - Deadline uint64 - // Partition index within the deadline containing the faulty sectors. - Partition uint64 - // Sectors in the partition being declared faulty. - Sectors bitfield.BitField -} - -type ReplicaUpdate struct { - SectorID abi.SectorNumber - Deadline uint64 - Partition uint64 - NewSealedSectorCID cid.Cid `checked:"true"` - Deals []abi.DealID - UpdateProofType abi.RegisteredUpdateProof - ReplicaProof []byte -} - -type ProveReplicaUpdatesParams struct { - Updates []ReplicaUpdate -} - -type ReplicaUpdate2 struct { - SectorID abi.SectorNumber - Deadline uint64 - Partition uint64 - NewSealedSectorCID cid.Cid `checked:"true"` - NewUnsealedSectorCID cid.Cid `checked:"true"` - Deals []abi.DealID - UpdateProofType abi.RegisteredUpdateProof - ReplicaProof []byte -} - -type ProveReplicaUpdatesParams2 struct { - Updates []ReplicaUpdate2 -} - -type PoStPartition struct { - // Partitions are numbered per-deadline, from zero. - Index uint64 - // Sectors skipped while proving that weren't already declared faulty - Skipped bitfield.BitField -} - -// Information submitted by a miner to provide a Window PoSt. -type SubmitWindowedPoStParams struct { - // The deadline index which the submission targets. - Deadline uint64 - // The partitions being proven. - Partitions []PoStPartition - // Array of proofs, one per distinct registered proof type present in the sectors being proven. - // In the usual case of a single proof type, this array will always have a single element (independent of number of partitions). - Proofs []proof.PoStProof - // The epoch at which these proofs is being committed to a particular chain. - ChainCommitEpoch abi.ChainEpoch - // The ticket randomness on the chain at the ChainCommitEpoch on the chain this post is committed to - ChainCommitRand abi.Randomness -} - -type DisputeWindowedPoStParams struct { - Deadline uint64 - PoStIndex uint64 // only one is allowed at a time to avoid loading too many sector infos. -} - -type ProveCommitAggregateParams struct { - SectorNumbers bitfield.BitField - AggregateProof []byte -} - -type ProveCommitSectorParams struct { - SectorNumber abi.SectorNumber - Proof []byte -} - -type MinerConstructorParams = power.MinerConstructorParams - -type TerminateSectorsParams struct { - Terminations []TerminationDeclaration -} - -type TerminationDeclaration struct { - Deadline uint64 - Partition uint64 - Sectors bitfield.BitField -} - -type TerminateSectorsReturn struct { - // Set to true if all early termination work has been completed. When - // false, the miner may choose to repeatedly invoke TerminateSectors - // with no new sectors to process the remainder of the pending - // terminations. While pending terminations are outstanding, the miner - // will not be able to withdraw funds. - Done bool -} - -type ChangePeerIDParams struct { - NewID abi.PeerID -} - -type ChangeMultiaddrsParams struct { - NewMultiaddrs []abi.Multiaddrs -} - -type ChangeWorkerAddressParams struct { - NewWorker addr.Address - NewControlAddrs []addr.Address -} - -type ExtendSectorExpirationParams struct { - Extensions []ExpirationExtension -} - -type ExpirationExtension struct { - Deadline uint64 - Partition uint64 - Sectors bitfield.BitField - NewExpiration abi.ChainEpoch -} - -type ReportConsensusFaultParams struct { - BlockHeader1 []byte - BlockHeader2 []byte - BlockHeaderExtra []byte -} - -type GetControlAddressesReturn struct { - Owner addr.Address - Worker addr.Address - ControlAddrs []addr.Address -} - -type CheckSectorProvenParams struct { - SectorNumber abi.SectorNumber -} - -type WithdrawBalanceParams struct { - AmountRequested abi.TokenAmount -} - -type CompactPartitionsParams struct { - Deadline uint64 - Partitions bitfield.BitField -} - -type CompactSectorNumbersParams struct { - MaskSectorNumbers bitfield.BitField -} - -type CronEventType int64 - -const ( - CronEventWorkerKeyChange CronEventType = iota - CronEventProvingDeadline - CronEventProcessEarlyTerminations -) - -type CronEventPayload struct { - EventType CronEventType -} - -// Identifier for a single partition within a miner. -type PartitionKey struct { - Deadline uint64 - Partition uint64 -} - -type PreCommitSectorParams struct { - SealProof abi.RegisteredSealProof - SectorNumber abi.SectorNumber - SealedCID cid.Cid `checked:"true"` // CommR - SealRandEpoch abi.ChainEpoch - DealIDs []abi.DealID - Expiration abi.ChainEpoch - ReplaceCapacity bool // DEPRECATED: Whether to replace a "committed capacity" no-deal sector (requires non-empty DealIDs) - // DEPRECATED: The committed capacity sector to replace, and it's deadline/partition location - ReplaceSectorDeadline uint64 - ReplaceSectorPartition uint64 - ReplaceSectorNumber abi.SectorNumber -} - -type PreCommitSectorBatchParams struct { - Sectors []PreCommitSectorParams -} - -type PreCommitSectorBatchParams2 struct { - Sectors []SectorPreCommitInfo -} - -type ChangeBeneficiaryParams struct { - NewBeneficiary addr.Address - NewQuota abi.TokenAmount - NewExpiration abi.ChainEpoch -} - -type ActiveBeneficiary struct { - Beneficiary addr.Address - Term BeneficiaryTerm -} - -type GetBeneficiaryReturn struct { - Active ActiveBeneficiary - Proposed *PendingBeneficiaryChange -} - -// ExpirationSet is a collection of sector numbers that are expiring, either due to -// expected "on-time" expiration at the end of their life, or unexpected "early" termination -// due to being faulty for too long consecutively. -// Note that there is not a direct correspondence between on-time sectors and active power; -// a sector may be faulty but expiring on-time if it faults just prior to expected termination. -// Early sectors are always faulty, and active power always represents on-time sectors. -type ExpirationSet struct { - OnTimeSectors bitfield.BitField // Sectors expiring "on time" at the end of their committed life - EarlySectors bitfield.BitField // Sectors expiring "early" due to being faulty for too long - OnTimePledge abi.TokenAmount // Pledge total for the on-time sectors - ActivePower PowerPair // Power that is currently active (not faulty) - FaultyPower PowerPair // Power that is currently faulty -} - -// A queue of expiration sets by epoch, representing the on-time or early termination epoch for a collection of sectors. -// Wraps an AMT[ChainEpoch]*ExpirationSet. -// Keys in the queue are quantized (upwards), modulo some offset, to reduce the cardinality of keys. -type ExpirationQueue struct { - *adt.Array - quant builtin.QuantSpec -} - -// Loads a queue root. -// Epochs provided to subsequent method calls will be quantized upwards to quanta mod offsetSeed before being -// written to/read from queue entries. -func LoadExpirationQueue(store adt.Store, root cid.Cid, quant builtin.QuantSpec, bitwidth int) (ExpirationQueue, error) { - arr, err := adt.AsArray(store, root, bitwidth) - if err != nil { - return ExpirationQueue{}, xerrors.Errorf("failed to load epoch queue %v: %w", root, err) - } - return ExpirationQueue{arr, quant}, nil -} -func LoadSectors(store adt.Store, root cid.Cid) (Sectors, error) { - sectorsArr, err := adt.AsArray(store, root, SectorsAmtBitwidth) - if err != nil { - return Sectors{}, err - } - return Sectors{sectorsArr}, nil -} - -// Sectors is a helper type for accessing/modifying a miner's sectors. It's safe -// to pass this object around as needed. -type Sectors struct { - *adt.Array -} - -func (sa Sectors) Load(sectorNos bitfield.BitField) ([]*SectorOnChainInfo, error) { - var sectorInfos []*SectorOnChainInfo - if err := sectorNos.ForEach(func(i uint64) error { - var sectorOnChain SectorOnChainInfo - found, err := sa.Array.Get(i, §orOnChain) - if err != nil { - return xc.ErrIllegalState.Wrapf("failed to load sector %v: %w", abi.SectorNumber(i), err) - } else if !found { - return xc.ErrNotFound.Wrapf("can't find sector %d", i) - } - sectorInfos = append(sectorInfos, §orOnChain) - return nil - }); err != nil { - // Keep the underlying error code, unless the error was from - // traversing the bitfield. In that case, it's an illegal - // argument error. - return nil, xc.Unwrap(err, xc.ErrIllegalArgument).Wrapf("failed to load sectors: %w", err) - } - return sectorInfos, nil -} - -func (sa Sectors) Get(sectorNumber abi.SectorNumber) (info *SectorOnChainInfo, found bool, err error) { - var res SectorOnChainInfo - if found, err := sa.Array.Get(uint64(sectorNumber), &res); err != nil { - return nil, false, xerrors.Errorf("failed to get sector %d: %w", sectorNumber, err) - } else if !found { - return nil, false, nil - } - return &res, true, nil -} - -// VestingFunds represents the vesting table state for the miner. -// It is a slice of (VestingEpoch, VestingAmount). -// The slice will always be sorted by the VestingEpoch. -type VestingFunds struct { - Funds []VestingFund -} - -// VestingFund represents miner funds that will vest at the given epoch. -type VestingFund struct { - Epoch abi.ChainEpoch - Amount abi.TokenAmount -} - -// ConstructVestingFunds constructs empty VestingFunds state. -func ConstructVestingFunds() *VestingFunds { - v := new(VestingFunds) - v.Funds = nil - return v -} - -type DeferredCronEventParams struct { - EventPayload []byte - RewardSmoothed smoothing.FilterEstimate - QualityAdjPowerSmoothed smoothing.FilterEstimate -} - -type ApplyRewardParams struct { - Reward abi.TokenAmount - Penalty abi.TokenAmount -} - -type ConfirmSectorProofsParams struct { - Sectors []abi.SectorNumber - RewardSmoothed smoothing.FilterEstimate - RewardBaselinePower abi.StoragePower - QualityAdjPowerSmoothed smoothing.FilterEstimate -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/miner/monies.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/miner/monies.go deleted file mode 100644 index b2142a8..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/miner/monies.go +++ /dev/null @@ -1,116 +0,0 @@ -package miner - -import ( - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/go-state-types/builtin" - "github.com/filecoin-project/go-state-types/builtin/v9/util/math" - "github.com/filecoin-project/go-state-types/builtin/v9/util/smoothing" -) - -// Projection period of expected sector block reward for deposit required to pre-commit a sector. -// This deposit is lost if the pre-commitment is not timely followed up by a commitment proof. -var PreCommitDepositFactor = 20 // PARAM_SPEC -var PreCommitDepositProjectionPeriod = abi.ChainEpoch(PreCommitDepositFactor) * builtin.EpochsInDay - -// Projection period of expected sector block rewards for storage pledge required to commit a sector. -// This pledge is lost if a sector is terminated before its full committed lifetime. -var InitialPledgeFactor = 20 // PARAM_SPEC -var InitialPledgeProjectionPeriod = abi.ChainEpoch(InitialPledgeFactor) * builtin.EpochsInDay - -// Cap on initial pledge requirement for sectors. -// The target is 1 FIL (10**18 attoFIL) per 32GiB. -// This does not divide evenly, so the result is fractionally smaller. -var InitialPledgeMaxPerByte = big.Div(big.NewInt(1e18), big.NewInt(32<<30)) - -// Multiplier of share of circulating money supply for consensus pledge required to commit a sector. -// This pledge is lost if a sector is terminated before its full committed lifetime. -var InitialPledgeLockTarget = builtin.BigFrac{ - Numerator: big.NewInt(3), // PARAM_SPEC - Denominator: big.NewInt(10), -} - -// The projected block reward a sector would earn over some period. -// Also known as "BR(t)". -// BR(t) = ProjectedRewardFraction(t) * SectorQualityAdjustedPower -// ProjectedRewardFraction(t) is the sum of estimated reward over estimated total power -// over all epochs in the projection period [t t+projectionDuration] -func ExpectedRewardForPower(rewardEstimate, networkQAPowerEstimate smoothing.FilterEstimate, qaSectorPower abi.StoragePower, projectionDuration abi.ChainEpoch) abi.TokenAmount { - networkQAPowerSmoothed := smoothing.Estimate(&networkQAPowerEstimate) - if networkQAPowerSmoothed.IsZero() { - return smoothing.Estimate(&rewardEstimate) - } - expectedRewardForProvingPeriod := smoothing.ExtrapolatedCumSumOfRatio(projectionDuration, 0, rewardEstimate, networkQAPowerEstimate) - br128 := big.Mul(qaSectorPower, expectedRewardForProvingPeriod) // Q.0 * Q.128 => Q.128 - br := big.Rsh(br128, math.Precision128) - - return big.Max(br, big.Zero()) -} - -// BR but zero values are clamped at 1 attofil -// Some uses of BR (PCD, IP) require a strictly positive value for BR derived values so -// accounting variables can be used as succinct indicators of miner activity. -func ExpectedRewardForPowerClampedAtAttoFIL(rewardEstimate, networkQAPowerEstimate smoothing.FilterEstimate, qaSectorPower abi.StoragePower, projectionDuration abi.ChainEpoch) abi.TokenAmount { - br := ExpectedRewardForPower(rewardEstimate, networkQAPowerEstimate, qaSectorPower, projectionDuration) - if br.LessThanEqual(big.Zero()) { - br = abi.NewTokenAmount(1) - } - return br -} - -// Computes the PreCommit deposit given sector qa weight and current network conditions. -// PreCommit Deposit = BR(PreCommitDepositProjectionPeriod) -func PreCommitDepositForPower(rewardEstimate, networkQAPowerEstimate smoothing.FilterEstimate, qaSectorPower abi.StoragePower) abi.TokenAmount { - return ExpectedRewardForPowerClampedAtAttoFIL(rewardEstimate, networkQAPowerEstimate, qaSectorPower, PreCommitDepositProjectionPeriod) -} - -// Computes the pledge requirement for committing new quality-adjusted power to the network, given the current -// network total and baseline power, per-epoch reward, and circulating token supply. -// The pledge comprises two parts: -// - storage pledge, aka IP base: a multiple of the reward expected to be earned by newly-committed power -// - consensus pledge, aka additional IP: a pro-rata fraction of the circulating money supply -// -// IP = IPBase(t) + AdditionalIP(t) -// IPBase(t) = BR(t, InitialPledgeProjectionPeriod) -// AdditionalIP(t) = LockTarget(t)*PledgeShare(t) -// LockTarget = (LockTargetFactorNum / LockTargetFactorDenom) * FILCirculatingSupply(t) -// PledgeShare(t) = sectorQAPower / max(BaselinePower(t), NetworkQAPower(t)) -func InitialPledgeForPower(qaPower, baselinePower abi.StoragePower, rewardEstimate, networkQAPowerEstimate smoothing.FilterEstimate, circulatingSupply abi.TokenAmount) abi.TokenAmount { - ipBase := ExpectedRewardForPowerClampedAtAttoFIL(rewardEstimate, networkQAPowerEstimate, qaPower, InitialPledgeProjectionPeriod) - - lockTargetNum := big.Mul(InitialPledgeLockTarget.Numerator, circulatingSupply) - lockTargetDenom := InitialPledgeLockTarget.Denominator - pledgeShareNum := qaPower - networkQAPower := smoothing.Estimate(&networkQAPowerEstimate) - pledgeShareDenom := big.Max(big.Max(networkQAPower, baselinePower), qaPower) // use qaPower in case others are 0 - additionalIPNum := big.Mul(lockTargetNum, pledgeShareNum) - additionalIPDenom := big.Mul(lockTargetDenom, pledgeShareDenom) - additionalIP := big.Div(additionalIPNum, additionalIPDenom) - - nominalPledge := big.Add(ipBase, additionalIP) - spaceRacePledgeCap := big.Mul(InitialPledgeMaxPerByte, qaPower) - return big.Min(nominalPledge, spaceRacePledgeCap) -} - -var EstimatedSingleProveCommitGasUsage = big.NewInt(49299973) // PARAM_SPEC -var EstimatedSinglePreCommitGasUsage = big.NewInt(16433324) // PARAM_SPEC -var BatchDiscount = builtin.BigFrac{ // PARAM_SPEC - Numerator: big.NewInt(1), - Denominator: big.NewInt(20), -} -var BatchBalancer = big.Mul(big.NewInt(5), builtin.OneNanoFIL) // PARAM_SPEC - -func AggregateProveCommitNetworkFee(aggregateSize int, baseFee abi.TokenAmount) abi.TokenAmount { - return aggregateNetworkFee(aggregateSize, EstimatedSingleProveCommitGasUsage, baseFee) -} - -func AggregatePreCommitNetworkFee(aggregateSize int, baseFee abi.TokenAmount) abi.TokenAmount { - return aggregateNetworkFee(aggregateSize, EstimatedSinglePreCommitGasUsage, baseFee) -} - -func aggregateNetworkFee(aggregateSize int, gasUsage big.Int, baseFee abi.TokenAmount) abi.TokenAmount { - effectiveGasFee := big.Max(baseFee, BatchBalancer) - networkFeeNum := big.Product(effectiveGasFee, gasUsage, big.NewInt(int64(aggregateSize)), BatchDiscount.Numerator) - networkFee := big.Div(networkFeeNum, BatchDiscount.Denominator) - return networkFee -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/miner/partition_state.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/miner/partition_state.go deleted file mode 100644 index df94d3a..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/miner/partition_state.go +++ /dev/null @@ -1,116 +0,0 @@ -package miner - -import ( - "github.com/filecoin-project/go-bitfield" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - "github.com/ipfs/go-cid" - "golang.org/x/xerrors" -) - -type Partition struct { - // Sector numbers in this partition, including faulty, unproven, and terminated sectors. - Sectors bitfield.BitField - // Unproven sectors in this partition. This bitfield will be cleared on - // a successful window post (or at the end of the partition's next - // deadline). At that time, any still unproven sectors will be added to - // the faulty sector bitfield. - Unproven bitfield.BitField - // Subset of sectors detected/declared faulty and not yet recovered (excl. from PoSt). - // Faults ∩ Terminated = ∅ - Faults bitfield.BitField - // Subset of faulty sectors expected to recover on next PoSt - // Recoveries ∩ Terminated = ∅ - Recoveries bitfield.BitField - // Subset of sectors terminated but not yet removed from partition (excl. from PoSt) - Terminated bitfield.BitField - // Maps epochs sectors that expire in or before that epoch. - // An expiration may be an "on-time" scheduled expiration, or early "faulty" expiration. - // Keys are quantized to last-in-deadline epochs. - ExpirationsEpochs cid.Cid // AMT[ChainEpoch]ExpirationSet - // Subset of terminated that were before their committed expiration epoch, by termination epoch. - // Termination fees have not yet been calculated or paid and associated deals have not yet been - // canceled but effective power has already been adjusted. - // Not quantized. - EarlyTerminated cid.Cid // AMT[ChainEpoch]BitField - - // Power of not-yet-terminated sectors (incl faulty & unproven). - LivePower PowerPair - // Power of yet-to-be-proved sectors (never faulty). - UnprovenPower PowerPair - // Power of currently-faulty sectors. FaultyPower <= LivePower. - FaultyPower PowerPair - // Power of expected-to-recover sectors. RecoveringPower <= FaultyPower. - RecoveringPower PowerPair -} - -// Bitwidth of AMTs determined empirically from mutation patterns and projections of mainnet data. -const PartitionExpirationAmtBitwidth = 4 -const PartitionEarlyTerminationArrayAmtBitwidth = 3 - -// Value type for a pair of raw and QA power. -type PowerPair struct { - Raw abi.StoragePower - QA abi.StoragePower -} - -// Live sectors are those that are not terminated (but may be faulty). -func (p *Partition) LiveSectors() (bitfield.BitField, error) { - live, err := bitfield.SubtractBitField(p.Sectors, p.Terminated) - if err != nil { - return bitfield.BitField{}, xerrors.Errorf("failed to compute live sectors: %w", err) - } - return live, nil - -} - -// Active sectors are those that are neither terminated nor faulty nor unproven, i.e. actively contributing power. -func (p *Partition) ActiveSectors() (bitfield.BitField, error) { - live, err := p.LiveSectors() - if err != nil { - return bitfield.BitField{}, err - } - nonFaulty, err := bitfield.SubtractBitField(live, p.Faults) - if err != nil { - return bitfield.BitField{}, xerrors.Errorf("failed to compute active sectors: %w", err) - } - active, err := bitfield.SubtractBitField(nonFaulty, p.Unproven) - if err != nil { - return bitfield.BitField{}, xerrors.Errorf("failed to compute active sectors: %w", err) - } - return active, err -} - -// Activates unproven sectors, returning the activated power. -func (p *Partition) ActivateUnproven() PowerPair { - newPower := p.UnprovenPower - p.UnprovenPower = NewPowerPairZero() - p.Unproven = bitfield.New() - return newPower -} - -// -// PowerPair -// - -func NewPowerPairZero() PowerPair { - return NewPowerPair(big.Zero(), big.Zero()) -} - -func NewPowerPair(raw, qa abi.StoragePower) PowerPair { - return PowerPair{Raw: raw, QA: qa} -} - -func (pp PowerPair) Add(other PowerPair) PowerPair { - return PowerPair{ - Raw: big.Add(pp.Raw, other.Raw), - QA: big.Add(pp.QA, other.QA), - } -} - -func (pp PowerPair) Sub(other PowerPair) PowerPair { - return PowerPair{ - Raw: big.Sub(pp.Raw, other.Raw), - QA: big.Sub(pp.QA, other.QA), - } -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/miner/policy.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/miner/policy.go deleted file mode 100644 index bdc922d..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/miner/policy.go +++ /dev/null @@ -1,183 +0,0 @@ -package miner - -import ( - "github.com/filecoin-project/go-state-types/builtin" - - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - "github.com/ipfs/go-cid" - mh "github.com/multiformats/go-multihash" -) - -// The period over which a miner's active sectors are expected to be proven via WindowPoSt. -// This guarantees that (1) user data is proven daily, (2) user data is stored for 24h by a rational miner -// (due to Window PoSt cost assumption). -var WPoStProvingPeriod = abi.ChainEpoch(builtin.EpochsInDay) // 24 hours PARAM_SPEC - -// The period between the opening and the closing of a WindowPoSt deadline in which the miner is expected to -// provide a Window PoSt proof. -// This provides a miner enough time to compute and propagate a Window PoSt proof. -var WPoStChallengeWindow = abi.ChainEpoch(30 * 60 / builtin.EpochDurationSeconds) // 30 minutes (48 per day) PARAM_SPEC - -// WPoStDisputeWindow is the period after a challenge window ends during which -// PoSts submitted during that period may be disputed. -var WPoStDisputeWindow = 2 * ChainFinality // PARAM_SPEC - -// The number of non-overlapping PoSt deadlines in a proving period. -// This spreads a miner's Window PoSt work across a proving period. -const WPoStPeriodDeadlines = uint64(48) // PARAM_SPEC - -// MaxPartitionsPerDeadline is the maximum number of partitions that will be assigned to a deadline. -// For a minimum storage of upto 1Eib, we need 300 partitions per deadline. -// 48 * 32GiB * 2349 * 300 = 1.00808144 EiB -// So, to support upto 10Eib storage, we set this to 3000. -const MaxPartitionsPerDeadline = 3000 - -// The maximum number of partitions that can be loaded in a single invocation. -// This limits the number of simultaneous fault, recovery, or sector-extension declarations. -// We set this to same as MaxPartitionsPerDeadline so we can process that many partitions every deadline. -const AddressedPartitionsMax = MaxPartitionsPerDeadline - -// Maximum number of unique "declarations" in batch operations. -const DeclarationsMax = AddressedPartitionsMax - -// The maximum number of sector infos that can be loaded in a single invocation. -// This limits the amount of state to be read in a single message execution. -const AddressedSectorsMax = 25_000 // PARAM_SPEC - -// Epochs after which chain state is final with overwhelming probability (hence the likelihood of two fork of this size is negligible) -// This is a conservative value that is chosen via simulations of all known attacks. -const ChainFinality = abi.ChainEpoch(900) // PARAM_SPEC - -// Prefix for sealed sector CIDs (CommR). -var SealedCIDPrefix = cid.Prefix{ - Version: 1, - Codec: cid.FilCommitmentSealed, - MhType: mh.POSEIDON_BLS12_381_A1_FC1, - MhLength: 32, -} - -// List of proof types which may be used when creating a new miner actor. -// This is mutable to allow configuration of testing and development networks. -var WindowPoStProofTypes = map[abi.RegisteredPoStProof]struct{}{ - abi.RegisteredPoStProof_StackedDrgWindow32GiBV1: {}, - abi.RegisteredPoStProof_StackedDrgWindow64GiBV1: {}, -} - -// Maximum delay to allow between sector pre-commit and subsequent proof. -// The allowable delay depends on seal proof algorithm. -var MaxProveCommitDuration = map[abi.RegisteredSealProof]abi.ChainEpoch{ - abi.RegisteredSealProof_StackedDrg32GiBV1: builtin.EpochsInDay + PreCommitChallengeDelay, // PARAM_SPEC - abi.RegisteredSealProof_StackedDrg2KiBV1: builtin.EpochsInDay + PreCommitChallengeDelay, - abi.RegisteredSealProof_StackedDrg8MiBV1: builtin.EpochsInDay + PreCommitChallengeDelay, - abi.RegisteredSealProof_StackedDrg512MiBV1: builtin.EpochsInDay + PreCommitChallengeDelay, - abi.RegisteredSealProof_StackedDrg64GiBV1: builtin.EpochsInDay + PreCommitChallengeDelay, - - abi.RegisteredSealProof_StackedDrg32GiBV1_1: 30*builtin.EpochsInDay + PreCommitChallengeDelay, // PARAM_SPEC - abi.RegisteredSealProof_StackedDrg2KiBV1_1: 30*builtin.EpochsInDay + PreCommitChallengeDelay, - abi.RegisteredSealProof_StackedDrg8MiBV1_1: 30*builtin.EpochsInDay + PreCommitChallengeDelay, - abi.RegisteredSealProof_StackedDrg512MiBV1_1: 30*builtin.EpochsInDay + PreCommitChallengeDelay, - abi.RegisteredSealProof_StackedDrg64GiBV1_1: 30*builtin.EpochsInDay + PreCommitChallengeDelay, -} - -// The maximum number of sector pre-commitments in a single batch. -// 32 sectors per epoch would support a single miner onboarding 1EiB of 32GiB sectors in 1 year. -const PreCommitSectorBatchMaxSize = 256 - -// The maximum number of sector replica updates in a single batch. -// Same as PreCommitSectorBatchMaxSize for consistency -const ProveReplicaUpdatesMaxSize = PreCommitSectorBatchMaxSize - -// Maximum delay between challenge and pre-commitment. -// This prevents a miner sealing sectors far in advance of committing them to the chain, thus committing to a -// particular chain. -var MaxPreCommitRandomnessLookback = builtin.EpochsInDay + ChainFinality // PARAM_SPEC - -// Number of epochs between publishing a sector pre-commitment and when the challenge for interactive PoRep is drawn. -// This (1) prevents a miner predicting a challenge before staking their pre-commit deposit, and -// (2) prevents a miner attempting a long fork in the past to insert a pre-commitment after seeing the challenge. -var PreCommitChallengeDelay = abi.ChainEpoch(150) // PARAM_SPEC - -// Lookback from the deadline's challenge window opening from which to sample chain randomness for the WindowPoSt challenge seed. -// This means that deadline windows can be non-overlapping (which make the programming simpler) without requiring a -// miner to wait for chain stability during the challenge window. -// This value cannot be too large lest it compromise the rationality of honest storage (from Window PoSt cost assumptions). -const WPoStChallengeLookback = abi.ChainEpoch(20) // PARAM_SPEC - -// Minimum period between fault declaration and the next deadline opening. -// If the number of epochs between fault declaration and deadline's challenge window opening is lower than FaultDeclarationCutoff, -// the fault declaration is considered invalid for that deadline. -// This guarantees that a miner is not likely to successfully fork the chain and declare a fault after seeing the challenges. -const FaultDeclarationCutoff = WPoStChallengeLookback + 50 // PARAM_SPEC - -// The maximum age of a fault before the sector is terminated. -// This bounds the time a miner can lose client's data before sacrificing pledge and deal collateral. -var FaultMaxAge = WPoStProvingPeriod * 42 // PARAM_SPEC - -// Staging period for a miner worker key change. -// This delay prevents a miner choosing a more favorable worker key that wins leader elections. -const WorkerKeyChangeDelay = ChainFinality // PARAM_SPEC - -// Minimum number of epochs past the current epoch a sector may be set to expire. -const MinSectorExpiration = 180 * builtin.EpochsInDay // PARAM_SPEC - -// The maximum number of epochs past the current epoch that sector lifetime may be extended. -// A sector may be extended multiple times, however, the total maximum lifetime is also bounded by -// the associated seal proof's maximum lifetime. -const MaxSectorExpirationExtension = 540 * builtin.EpochsInDay // PARAM_SPEC - -// DealWeight and VerifiedDealWeight are spacetime occupied by regular deals and verified deals in a sector. -// Sum of DealWeight and VerifiedDealWeight should be less than or equal to total SpaceTime of a sector. -// Sectors full of VerifiedDeals will have a SectorQuality of VerifiedDealWeightMultiplier/QualityBaseMultiplier. -// Sectors full of Deals will have a SectorQuality of DealWeightMultiplier/QualityBaseMultiplier. -// Sectors with neither will have a SectorQuality of QualityBaseMultiplier/QualityBaseMultiplier. -// SectorQuality of a sector is a weighted average of multipliers based on their proportions. -func QualityForWeight(size abi.SectorSize, duration abi.ChainEpoch, dealWeight, verifiedWeight abi.DealWeight) abi.SectorQuality { - // sectorSpaceTime = size * duration - sectorSpaceTime := big.Mul(big.NewIntUnsigned(uint64(size)), big.NewInt(int64(duration))) - // totalDealSpaceTime = dealWeight + verifiedWeight - totalDealSpaceTime := big.Add(dealWeight, verifiedWeight) - - // Base - all size * duration of non-deals - // weightedBaseSpaceTime = (sectorSpaceTime - totalDealSpaceTime) * QualityBaseMultiplier - weightedBaseSpaceTime := big.Mul(big.Sub(sectorSpaceTime, totalDealSpaceTime), builtin.QualityBaseMultiplier) - // Deal - all deal size * deal duration * 10 - // weightedDealSpaceTime = dealWeight * DealWeightMultiplier - weightedDealSpaceTime := big.Mul(dealWeight, builtin.DealWeightMultiplier) - // Verified - all verified deal size * verified deal duration * 100 - // weightedVerifiedSpaceTime = verifiedWeight * VerifiedDealWeightMultiplier - weightedVerifiedSpaceTime := big.Mul(verifiedWeight, builtin.VerifiedDealWeightMultiplier) - // Sum - sum of all spacetime - // weightedSumSpaceTime = weightedBaseSpaceTime + weightedDealSpaceTime + weightedVerifiedSpaceTime - weightedSumSpaceTime := big.Sum(weightedBaseSpaceTime, weightedDealSpaceTime, weightedVerifiedSpaceTime) - // scaledUpWeightedSumSpaceTime = weightedSumSpaceTime * 2^20 - scaledUpWeightedSumSpaceTime := big.Lsh(weightedSumSpaceTime, builtin.SectorQualityPrecision) - - // Average of weighted space time: (scaledUpWeightedSumSpaceTime / sectorSpaceTime * 10) - return big.Div(big.Div(scaledUpWeightedSumSpaceTime, sectorSpaceTime), builtin.QualityBaseMultiplier) -} - -// The power for a sector size, committed duration, and weight. -func QAPowerForWeight(size abi.SectorSize, duration abi.ChainEpoch, dealWeight, verifiedWeight abi.DealWeight) abi.StoragePower { - quality := QualityForWeight(size, duration, dealWeight, verifiedWeight) - return big.Rsh(big.Mul(big.NewIntUnsigned(uint64(size)), quality), builtin.SectorQualityPrecision) -} - -const MaxAggregatedSectors = 819 -const MinAggregatedSectors = 4 -const MaxAggregateProofSize = 81960 - -// Specification for a linear vesting schedule. -type VestSpec struct { - InitialDelay abi.ChainEpoch // Delay before any amount starts vesting. - VestPeriod abi.ChainEpoch // Period over which the total should vest, after the initial delay. - StepDuration abi.ChainEpoch // Duration between successive incremental vests (independent of vesting period). - Quantization abi.ChainEpoch // Maximum precision of vesting table (limits cardinality of table). -} - -// Returns maximum achievable QA power. -func QAPowerMax(size abi.SectorSize) abi.StoragePower { - return big.Div( - big.Mul(big.NewInt(int64(size)), builtin.VerifiedDealWeightMultiplier), - builtin.QualityBaseMultiplier) -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/power/cbor_gen.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/power/cbor_gen.go deleted file mode 100644 index 59d9bd5..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/power/cbor_gen.go +++ /dev/null @@ -1,1022 +0,0 @@ -// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. - -package power - -import ( - "fmt" - "io" - - address "github.com/filecoin-project/go-address" - abi "github.com/filecoin-project/go-state-types/abi" - cbg "github.com/whyrusleeping/cbor-gen" - xerrors "golang.org/x/xerrors" -) - -var _ = xerrors.Errorf - -var lengthBufState = []byte{143} - -func (t *State) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufState); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.TotalRawBytePower (big.Int) (struct) - if err := t.TotalRawBytePower.MarshalCBOR(w); err != nil { - return err - } - - // t.TotalBytesCommitted (big.Int) (struct) - if err := t.TotalBytesCommitted.MarshalCBOR(w); err != nil { - return err - } - - // t.TotalQualityAdjPower (big.Int) (struct) - if err := t.TotalQualityAdjPower.MarshalCBOR(w); err != nil { - return err - } - - // t.TotalQABytesCommitted (big.Int) (struct) - if err := t.TotalQABytesCommitted.MarshalCBOR(w); err != nil { - return err - } - - // t.TotalPledgeCollateral (big.Int) (struct) - if err := t.TotalPledgeCollateral.MarshalCBOR(w); err != nil { - return err - } - - // t.ThisEpochRawBytePower (big.Int) (struct) - if err := t.ThisEpochRawBytePower.MarshalCBOR(w); err != nil { - return err - } - - // t.ThisEpochQualityAdjPower (big.Int) (struct) - if err := t.ThisEpochQualityAdjPower.MarshalCBOR(w); err != nil { - return err - } - - // t.ThisEpochPledgeCollateral (big.Int) (struct) - if err := t.ThisEpochPledgeCollateral.MarshalCBOR(w); err != nil { - return err - } - - // t.ThisEpochQAPowerSmoothed (smoothing.FilterEstimate) (struct) - if err := t.ThisEpochQAPowerSmoothed.MarshalCBOR(w); err != nil { - return err - } - - // t.MinerCount (int64) (int64) - if t.MinerCount >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.MinerCount)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.MinerCount-1)); err != nil { - return err - } - } - - // t.MinerAboveMinPowerCount (int64) (int64) - if t.MinerAboveMinPowerCount >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.MinerAboveMinPowerCount)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.MinerAboveMinPowerCount-1)); err != nil { - return err - } - } - - // t.CronEventQueue (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.CronEventQueue); err != nil { - return xerrors.Errorf("failed to write cid field t.CronEventQueue: %w", err) - } - - // t.FirstCronEpoch (abi.ChainEpoch) (int64) - if t.FirstCronEpoch >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.FirstCronEpoch)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.FirstCronEpoch-1)); err != nil { - return err - } - } - - // t.Claims (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.Claims); err != nil { - return xerrors.Errorf("failed to write cid field t.Claims: %w", err) - } - - // t.ProofValidationBatch (cid.Cid) (struct) - - if t.ProofValidationBatch == nil { - if _, err := w.Write(cbg.CborNull); err != nil { - return err - } - } else { - if err := cbg.WriteCidBuf(scratch, w, *t.ProofValidationBatch); err != nil { - return xerrors.Errorf("failed to write cid field t.ProofValidationBatch: %w", err) - } - } - - return nil -} - -func (t *State) UnmarshalCBOR(r io.Reader) error { - *t = State{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 15 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.TotalRawBytePower (big.Int) (struct) - - { - - if err := t.TotalRawBytePower.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.TotalRawBytePower: %w", err) - } - - } - // t.TotalBytesCommitted (big.Int) (struct) - - { - - if err := t.TotalBytesCommitted.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.TotalBytesCommitted: %w", err) - } - - } - // t.TotalQualityAdjPower (big.Int) (struct) - - { - - if err := t.TotalQualityAdjPower.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.TotalQualityAdjPower: %w", err) - } - - } - // t.TotalQABytesCommitted (big.Int) (struct) - - { - - if err := t.TotalQABytesCommitted.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.TotalQABytesCommitted: %w", err) - } - - } - // t.TotalPledgeCollateral (big.Int) (struct) - - { - - if err := t.TotalPledgeCollateral.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.TotalPledgeCollateral: %w", err) - } - - } - // t.ThisEpochRawBytePower (big.Int) (struct) - - { - - if err := t.ThisEpochRawBytePower.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.ThisEpochRawBytePower: %w", err) - } - - } - // t.ThisEpochQualityAdjPower (big.Int) (struct) - - { - - if err := t.ThisEpochQualityAdjPower.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.ThisEpochQualityAdjPower: %w", err) - } - - } - // t.ThisEpochPledgeCollateral (big.Int) (struct) - - { - - if err := t.ThisEpochPledgeCollateral.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.ThisEpochPledgeCollateral: %w", err) - } - - } - // t.ThisEpochQAPowerSmoothed (smoothing.FilterEstimate) (struct) - - { - - if err := t.ThisEpochQAPowerSmoothed.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.ThisEpochQAPowerSmoothed: %w", err) - } - - } - // t.MinerCount (int64) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.MinerCount = int64(extraI) - } - // t.MinerAboveMinPowerCount (int64) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.MinerAboveMinPowerCount = int64(extraI) - } - // t.CronEventQueue (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.CronEventQueue: %w", err) - } - - t.CronEventQueue = c - - } - // t.FirstCronEpoch (abi.ChainEpoch) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.FirstCronEpoch = abi.ChainEpoch(extraI) - } - // t.Claims (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.Claims: %w", err) - } - - t.Claims = c - - } - // t.ProofValidationBatch (cid.Cid) (struct) - - { - - b, err := br.ReadByte() - if err != nil { - return err - } - if b != cbg.CborNull[0] { - if err := br.UnreadByte(); err != nil { - return err - } - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.ProofValidationBatch: %w", err) - } - - t.ProofValidationBatch = &c - } - - } - return nil -} - -var lengthBufClaim = []byte{131} - -func (t *Claim) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufClaim); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.WindowPoStProofType (abi.RegisteredPoStProof) (int64) - if t.WindowPoStProofType >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.WindowPoStProofType)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.WindowPoStProofType-1)); err != nil { - return err - } - } - - // t.RawBytePower (big.Int) (struct) - if err := t.RawBytePower.MarshalCBOR(w); err != nil { - return err - } - - // t.QualityAdjPower (big.Int) (struct) - if err := t.QualityAdjPower.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *Claim) UnmarshalCBOR(r io.Reader) error { - *t = Claim{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 3 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.WindowPoStProofType (abi.RegisteredPoStProof) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.WindowPoStProofType = abi.RegisteredPoStProof(extraI) - } - // t.RawBytePower (big.Int) (struct) - - { - - if err := t.RawBytePower.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.RawBytePower: %w", err) - } - - } - // t.QualityAdjPower (big.Int) (struct) - - { - - if err := t.QualityAdjPower.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.QualityAdjPower: %w", err) - } - - } - return nil -} - -var lengthBufUpdateClaimedPowerParams = []byte{130} - -func (t *UpdateClaimedPowerParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufUpdateClaimedPowerParams); err != nil { - return err - } - - // t.RawByteDelta (big.Int) (struct) - if err := t.RawByteDelta.MarshalCBOR(w); err != nil { - return err - } - - // t.QualityAdjustedDelta (big.Int) (struct) - if err := t.QualityAdjustedDelta.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *UpdateClaimedPowerParams) UnmarshalCBOR(r io.Reader) error { - *t = UpdateClaimedPowerParams{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.RawByteDelta (big.Int) (struct) - - { - - if err := t.RawByteDelta.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.RawByteDelta: %w", err) - } - - } - // t.QualityAdjustedDelta (big.Int) (struct) - - { - - if err := t.QualityAdjustedDelta.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.QualityAdjustedDelta: %w", err) - } - - } - return nil -} - -var lengthBufMinerConstructorParams = []byte{134} - -func (t *MinerConstructorParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufMinerConstructorParams); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.OwnerAddr (address.Address) (struct) - if err := t.OwnerAddr.MarshalCBOR(w); err != nil { - return err - } - - // t.WorkerAddr (address.Address) (struct) - if err := t.WorkerAddr.MarshalCBOR(w); err != nil { - return err - } - - // t.ControlAddrs ([]address.Address) (slice) - if len(t.ControlAddrs) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.ControlAddrs was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.ControlAddrs))); err != nil { - return err - } - for _, v := range t.ControlAddrs { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - - // t.WindowPoStProofType (abi.RegisteredPoStProof) (int64) - if t.WindowPoStProofType >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.WindowPoStProofType)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.WindowPoStProofType-1)); err != nil { - return err - } - } - - // t.PeerId ([]uint8) (slice) - if len(t.PeerId) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.PeerId was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.PeerId))); err != nil { - return err - } - - if _, err := w.Write(t.PeerId[:]); err != nil { - return err - } - - // t.Multiaddrs ([][]uint8) (slice) - if len(t.Multiaddrs) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Multiaddrs was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Multiaddrs))); err != nil { - return err - } - for _, v := range t.Multiaddrs { - if len(v) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field v was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(v))); err != nil { - return err - } - - if _, err := w.Write(v[:]); err != nil { - return err - } - } - return nil -} - -func (t *MinerConstructorParams) UnmarshalCBOR(r io.Reader) error { - *t = MinerConstructorParams{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 6 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.OwnerAddr (address.Address) (struct) - - { - - if err := t.OwnerAddr.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.OwnerAddr: %w", err) - } - - } - // t.WorkerAddr (address.Address) (struct) - - { - - if err := t.WorkerAddr.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.WorkerAddr: %w", err) - } - - } - // t.ControlAddrs ([]address.Address) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.ControlAddrs: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.ControlAddrs = make([]address.Address, extra) - } - - for i := 0; i < int(extra); i++ { - - var v address.Address - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.ControlAddrs[i] = v - } - - // t.WindowPoStProofType (abi.RegisteredPoStProof) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.WindowPoStProofType = abi.RegisteredPoStProof(extraI) - } - // t.PeerId ([]uint8) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.PeerId: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - - if extra > 0 { - t.PeerId = make([]uint8, extra) - } - - if _, err := io.ReadFull(br, t.PeerId[:]); err != nil { - return err - } - // t.Multiaddrs ([][]uint8) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Multiaddrs: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Multiaddrs = make([][]uint8, extra) - } - - for i := 0; i < int(extra); i++ { - { - var maj byte - var extra uint64 - var err error - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.Multiaddrs[i]: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - - if extra > 0 { - t.Multiaddrs[i] = make([]uint8, extra) - } - - if _, err := io.ReadFull(br, t.Multiaddrs[i][:]); err != nil { - return err - } - } - } - - return nil -} - -var lengthBufCreateMinerReturn = []byte{130} - -func (t *CreateMinerReturn) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufCreateMinerReturn); err != nil { - return err - } - - // t.IDAddress (address.Address) (struct) - if err := t.IDAddress.MarshalCBOR(w); err != nil { - return err - } - - // t.RobustAddress (address.Address) (struct) - if err := t.RobustAddress.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *CreateMinerReturn) UnmarshalCBOR(r io.Reader) error { - *t = CreateMinerReturn{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.IDAddress (address.Address) (struct) - - { - - if err := t.IDAddress.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.IDAddress: %w", err) - } - - } - // t.RobustAddress (address.Address) (struct) - - { - - if err := t.RobustAddress.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.RobustAddress: %w", err) - } - - } - return nil -} - -var lengthBufCurrentTotalPowerReturn = []byte{132} - -func (t *CurrentTotalPowerReturn) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufCurrentTotalPowerReturn); err != nil { - return err - } - - // t.RawBytePower (big.Int) (struct) - if err := t.RawBytePower.MarshalCBOR(w); err != nil { - return err - } - - // t.QualityAdjPower (big.Int) (struct) - if err := t.QualityAdjPower.MarshalCBOR(w); err != nil { - return err - } - - // t.PledgeCollateral (big.Int) (struct) - if err := t.PledgeCollateral.MarshalCBOR(w); err != nil { - return err - } - - // t.QualityAdjPowerSmoothed (smoothing.FilterEstimate) (struct) - if err := t.QualityAdjPowerSmoothed.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *CurrentTotalPowerReturn) UnmarshalCBOR(r io.Reader) error { - *t = CurrentTotalPowerReturn{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 4 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.RawBytePower (big.Int) (struct) - - { - - if err := t.RawBytePower.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.RawBytePower: %w", err) - } - - } - // t.QualityAdjPower (big.Int) (struct) - - { - - if err := t.QualityAdjPower.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.QualityAdjPower: %w", err) - } - - } - // t.PledgeCollateral (big.Int) (struct) - - { - - if err := t.PledgeCollateral.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.PledgeCollateral: %w", err) - } - - } - // t.QualityAdjPowerSmoothed (smoothing.FilterEstimate) (struct) - - { - - if err := t.QualityAdjPowerSmoothed.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.QualityAdjPowerSmoothed: %w", err) - } - - } - return nil -} - -var lengthBufEnrollCronEventParams = []byte{130} - -func (t *EnrollCronEventParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufEnrollCronEventParams); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.EventEpoch (abi.ChainEpoch) (int64) - if t.EventEpoch >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.EventEpoch)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.EventEpoch-1)); err != nil { - return err - } - } - - // t.Payload ([]uint8) (slice) - if len(t.Payload) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.Payload was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Payload))); err != nil { - return err - } - - if _, err := w.Write(t.Payload[:]); err != nil { - return err - } - return nil -} - -func (t *EnrollCronEventParams) UnmarshalCBOR(r io.Reader) error { - *t = EnrollCronEventParams{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.EventEpoch (abi.ChainEpoch) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.EventEpoch = abi.ChainEpoch(extraI) - } - // t.Payload ([]uint8) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.Payload: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - - if extra > 0 { - t.Payload = make([]uint8, extra) - } - - if _, err := io.ReadFull(br, t.Payload[:]); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/power/methods.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/power/methods.go deleted file mode 100644 index d5474a2..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/power/methods.go +++ /dev/null @@ -1,18 +0,0 @@ -package power - -import ( - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/proof" -) - -var Methods = []interface{}{ - 1: *new(func(interface{}, *abi.EmptyValue) *abi.EmptyValue), // Constructor - 2: *new(func(interface{}, *CreateMinerParams) *CreateMinerReturn), // CreateMiner - 3: *new(func(interface{}, *UpdateClaimedPowerParams) *abi.EmptyValue), // UpdateClaimedPower - 4: *new(func(interface{}, *EnrollCronEventParams) *abi.EmptyValue), // EnrollCronEvent - 5: *new(func(interface{}, *abi.EmptyValue) *abi.EmptyValue), // CronTick - 6: *new(func(interface{}, *abi.TokenAmount) *abi.EmptyValue), // UpdatePledgeTotal - 7: nil, - 8: *new(func(interface{}, *proof.SealVerifyInfo) *abi.EmptyValue), // SubmitPoRepForBulkVerify - 9: *new(func(interface{}, *abi.EmptyValue) *CurrentTotalPowerReturn), // CurrentTotalPower -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/power/power_state.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/power/power_state.go deleted file mode 100644 index 6db09c5..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/power/power_state.go +++ /dev/null @@ -1,165 +0,0 @@ -package power - -import ( - addr "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/go-state-types/builtin" - "github.com/filecoin-project/go-state-types/builtin/v9/util/adt" - "github.com/filecoin-project/go-state-types/builtin/v9/util/smoothing" - "github.com/ipfs/go-cid" - "golang.org/x/xerrors" -) - -// genesis power in bytes = 750,000 GiB -var InitialQAPowerEstimatePosition = big.Mul(big.NewInt(750_000), big.NewInt(1<<30)) - -// max chain throughput in bytes per epoch = 120 ProveCommits / epoch = 3,840 GiB -var InitialQAPowerEstimateVelocity = big.Mul(big.NewInt(3_840), big.NewInt(1<<30)) - -// Bitwidth of CronEventQueue HAMT determined empirically from mutation -// patterns and projections of mainnet data. -const CronQueueHamtBitwidth = 6 - -// Bitwidth of CronEventQueue AMT determined empirically from mutation -// patterns and projections of mainnet data. -const CronQueueAmtBitwidth = 6 - -// Bitwidth of ProofValidationBatch AMT determined empirically from mutation -// pattersn and projections of mainnet data. -const ProofValidationBatchAmtBitwidth = 4 - -// The number of miners that must meet the consensus minimum miner power before that minimum power is enforced -// as a condition of leader election. -// This ensures a network still functions before any miners reach that threshold. -const ConsensusMinerMinMiners = 4 // PARAM_SPEC - -type State struct { - TotalRawBytePower abi.StoragePower - // TotalBytesCommitted includes claims from miners below min power threshold - TotalBytesCommitted abi.StoragePower - TotalQualityAdjPower abi.StoragePower - // TotalQABytesCommitted includes claims from miners below min power threshold - TotalQABytesCommitted abi.StoragePower - TotalPledgeCollateral abi.TokenAmount - - // These fields are set once per epoch in the previous cron tick and used - // for consistent values across a single epoch's state transition. - ThisEpochRawBytePower abi.StoragePower - ThisEpochQualityAdjPower abi.StoragePower - ThisEpochPledgeCollateral abi.TokenAmount - ThisEpochQAPowerSmoothed smoothing.FilterEstimate - - MinerCount int64 - // Number of miners having proven the minimum consensus power. - MinerAboveMinPowerCount int64 - - // A queue of events to be triggered by cron, indexed by epoch. - CronEventQueue cid.Cid // Multimap, (HAMT[ChainEpoch]AMT[CronEvent]) - - // First epoch in which a cron task may be stored. - // Cron will iterate every epoch between this and the current epoch inclusively to find tasks to execute. - FirstCronEpoch abi.ChainEpoch - - // Claimed power for each miner. - Claims cid.Cid // Map, HAMT[address]Claim - - ProofValidationBatch *cid.Cid // Multimap, (HAMT[Address]AMT[SealVerifyInfo]) -} - -func ConstructState(store adt.Store) (*State, error) { - emptyClaimsMapCid, err := adt.StoreEmptyMap(store, builtin.DefaultHamtBitwidth) - if err != nil { - return nil, xerrors.Errorf("failed to create empty map: %w", err) - } - emptyCronQueueMMapCid, err := adt.StoreEmptyMultimap(store, CronQueueHamtBitwidth, CronQueueAmtBitwidth) - if err != nil { - return nil, xerrors.Errorf("failed to create empty multimap: %w", err) - } - - return &State{ - TotalRawBytePower: abi.NewStoragePower(0), - TotalBytesCommitted: abi.NewStoragePower(0), - TotalQualityAdjPower: abi.NewStoragePower(0), - TotalQABytesCommitted: abi.NewStoragePower(0), - TotalPledgeCollateral: abi.NewTokenAmount(0), - ThisEpochRawBytePower: abi.NewStoragePower(0), - ThisEpochQualityAdjPower: abi.NewStoragePower(0), - ThisEpochPledgeCollateral: abi.NewTokenAmount(0), - ThisEpochQAPowerSmoothed: smoothing.NewEstimate(InitialQAPowerEstimatePosition, InitialQAPowerEstimateVelocity), - FirstCronEpoch: 0, - CronEventQueue: emptyCronQueueMMapCid, - Claims: emptyClaimsMapCid, - MinerCount: 0, - MinerAboveMinPowerCount: 0, - }, nil -} - -type Claim struct { - // Miner's proof type used to determine minimum miner size - WindowPoStProofType abi.RegisteredPoStProof - - // Sum of raw byte power for a miner's sectors. - RawBytePower abi.StoragePower - - // Sum of quality adjusted power for a miner's sectors. - QualityAdjPower abi.StoragePower -} - -// MinerNominalPowerMeetsConsensusMinimum is used to validate Election PoSt -// winners outside the chain state. If the miner has over a threshold of power -// the miner meets the minimum. If the network is a below a threshold of -// miners and has power > zero the miner meets the minimum. -func (st *State) MinerNominalPowerMeetsConsensusMinimum(s adt.Store, miner addr.Address) (bool, error) { //nolint:deadcode,unused - claims, err := adt.AsMap(s, st.Claims, builtin.DefaultHamtBitwidth) - if err != nil { - return false, xerrors.Errorf("failed to load claims: %w", err) - } - - claim, ok, err := getClaim(claims, miner) - if err != nil { - return false, err - } - if !ok { - return false, xerrors.Errorf("no claim for actor %w", miner) - } - - minerNominalPower := claim.RawBytePower - minerMinPower, err := builtin.ConsensusMinerMinPower(claim.WindowPoStProofType) - if err != nil { - return false, xerrors.Errorf("could not get miner min power from proof type: %w", err) - } - - // if miner is larger than min power requirement, we're set - if minerNominalPower.GreaterThanEqual(minerMinPower) { - return true, nil - } - - // otherwise, if ConsensusMinerMinMiners miners meet min power requirement, return false - if st.MinerAboveMinPowerCount >= ConsensusMinerMinMiners { - return false, nil - } - - // If fewer than ConsensusMinerMinMiners over threshold miner can win a block with non-zero power - return minerNominalPower.GreaterThan(abi.NewStoragePower(0)), nil -} - -func (st *State) GetClaim(s adt.Store, a addr.Address) (*Claim, bool, error) { - claims, err := adt.AsMap(s, st.Claims, builtin.DefaultHamtBitwidth) - if err != nil { - return nil, false, xerrors.Errorf("failed to load claims: %w", err) - } - return getClaim(claims, a) -} - -func getClaim(claims *adt.Map, a addr.Address) (*Claim, bool, error) { - var out Claim - found, err := claims.Get(abi.AddrKey(a), &out) - if err != nil { - return nil, false, xerrors.Errorf("failed to get claim for address %v: %w", a, err) - } - if !found { - return nil, false, nil - } - return &out, true, nil -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/power/power_types.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/power/power_types.go deleted file mode 100644 index f491b24..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/power/power_types.go +++ /dev/null @@ -1,48 +0,0 @@ -package power - -import ( - addr "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/builtin/v9/util/smoothing" -) - -// Storage miner actor constructor params are defined here so the power actor can send them to the init actor -// to instantiate miners. -type MinerConstructorParams struct { - OwnerAddr addr.Address - WorkerAddr addr.Address - ControlAddrs []addr.Address - WindowPoStProofType abi.RegisteredPoStProof - PeerId abi.PeerID - Multiaddrs []abi.Multiaddrs -} - -type CreateMinerParams struct { - Owner addr.Address - Worker addr.Address - WindowPoStProofType abi.RegisteredPoStProof - Peer abi.PeerID - Multiaddrs []abi.Multiaddrs -} - -type CreateMinerReturn struct { - IDAddress addr.Address // The canonical ID-based address for the actor. - RobustAddress addr.Address // A more expensive but re-org-safe address for the newly created actor. -} - -type UpdateClaimedPowerParams struct { - RawByteDelta abi.StoragePower - QualityAdjustedDelta abi.StoragePower -} - -type EnrollCronEventParams struct { - EventEpoch abi.ChainEpoch - Payload []byte -} - -type CurrentTotalPowerReturn struct { - RawBytePower abi.StoragePower - QualityAdjPower abi.StoragePower - PledgeCollateral abi.TokenAmount - QualityAdjPowerSmoothed smoothing.FilterEstimate -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/util/adt/array.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/util/adt/array.go deleted file mode 100644 index edcdf68..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/util/adt/array.go +++ /dev/null @@ -1,152 +0,0 @@ -package adt - -import ( - "bytes" - - amt "github.com/filecoin-project/go-amt-ipld/v4" - - "github.com/filecoin-project/go-state-types/cbor" - cid "github.com/ipfs/go-cid" - cbg "github.com/whyrusleeping/cbor-gen" - "golang.org/x/xerrors" -) - -var DefaultAmtOptions = []amt.Option{} - -// Array stores a sparse sequence of values in an AMT. -type Array struct { - root *amt.Root - store Store -} - -// AsArray interprets a store as an AMT-based array with root `r`. -func AsArray(s Store, r cid.Cid, bitwidth int) (*Array, error) { - options := append(DefaultAmtOptions, amt.UseTreeBitWidth(uint(bitwidth))) - root, err := amt.LoadAMT(s.Context(), s, r, options...) - if err != nil { - return nil, xerrors.Errorf("failed to root: %w", err) - } - - return &Array{ - root: root, - store: s, - }, nil -} - -// Creates a new array backed by an empty AMT. -func MakeEmptyArray(s Store, bitwidth int) (*Array, error) { - options := append(DefaultAmtOptions, amt.UseTreeBitWidth(uint(bitwidth))) - root, err := amt.NewAMT(s, options...) - if err != nil { - return nil, err - } - return &Array{ - root: root, - store: s, - }, nil -} - -// Writes a new empty array to the store, returning its CID. -func StoreEmptyArray(s Store, bitwidth int) (cid.Cid, error) { - arr, err := MakeEmptyArray(s, bitwidth) - if err != nil { - return cid.Undef, err - } - return arr.Root() -} - -// Returns the root CID of the underlying AMT. -func (a *Array) Root() (cid.Cid, error) { - return a.root.Flush(a.store.Context()) -} - -// Appends a value to the end of the array. Assumes continuous array. -// If the array isn't continuous use Set and a separate counter -func (a *Array) AppendContinuous(value cbor.Marshaler) error { - if err := a.root.Set(a.store.Context(), a.root.Len(), value); err != nil { - return xerrors.Errorf("append failed to set index %v value %v in root %v: %w", a.root.Len(), value, a.root, err) - } - return nil -} - -func (a *Array) Set(i uint64, value cbor.Marshaler) error { - if err := a.root.Set(a.store.Context(), i, value); err != nil { - return xerrors.Errorf("failed to set index %v value %v in root %v: %w", i, value, a.root, err) - } - return nil -} - -// Removes the value at index `i` from the AMT, if it exists. -// Returns whether the index was previously present. -func (a *Array) TryDelete(i uint64) (bool, error) { - if found, err := a.root.Delete(a.store.Context(), i); err != nil { - return false, xerrors.Errorf("array delete failed to delete index %v in root %v: %w", i, a.root, err) - } else { - return found, nil - } -} - -// Removes the value at index `i` from the AMT, expecting it to exist. -func (a *Array) Delete(i uint64) error { - if found, err := a.root.Delete(a.store.Context(), i); err != nil { - return xerrors.Errorf("failed to delete index %v in root %v: %w", i, a.root, err) - } else if !found { - return xerrors.Errorf("no such index %v in root %v to delete: %w", i, a.root, err) - } - return nil -} - -func (a *Array) BatchDelete(ix []uint64, strict bool) error { - if _, err := a.root.BatchDelete(a.store.Context(), ix, strict); err != nil { - return xerrors.Errorf("failed to batch delete keys %v: %w", ix, err) - } - return nil -} - -// Iterates all entries in the array, deserializing each value in turn into `out` and then calling a function. -// Iteration halts if the function returns an error. -// If the output parameter is nil, deserialization is skipped. -func (a *Array) ForEach(out cbor.Unmarshaler, fn func(i int64) error) error { - return a.root.ForEach(a.store.Context(), func(k uint64, val *cbg.Deferred) error { - if out != nil { - if deferred, ok := out.(*cbg.Deferred); ok { - // fast-path deferred -> deferred to avoid re-decoding. - *deferred = *val - } else if err := out.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { - return err - } - } - return fn(int64(k)) - }) -} - -func (a *Array) Length() uint64 { - return a.root.Len() -} - -// Get retrieves array element into the 'out' unmarshaler, returning a boolean -// indicating whether the element was found in the array -func (a *Array) Get(k uint64, out cbor.Unmarshaler) (bool, error) { - if found, err := a.root.Get(a.store.Context(), k, out); err != nil { - return false, xerrors.Errorf("failed to get index %v in root %v: %w", k, a.root, err) - } else { - return found, nil - } -} - -// Retrieves an array value into the 'out' unmarshaler (if non-nil), and removes the entry. -// Returns a boolean indicating whether the element was previously in the array. -func (a *Array) Pop(k uint64, out cbor.Unmarshaler) (bool, error) { - if found, err := a.root.Get(a.store.Context(), k, out); err != nil { - return false, xerrors.Errorf("failed to get index %v in root %v: %w", k, a.root, err) - } else if !found { - return false, nil - } - - if found, err := a.root.Delete(a.store.Context(), k); err != nil { - return false, xerrors.Errorf("failed to delete index %v in root %v: %w", k, a.root, err) - } else if !found { - return false, xerrors.Errorf("can't find index %v to delete in root %v", k, a.root) - } - return true, nil -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/util/adt/balancetable.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/util/adt/balancetable.go deleted file mode 100644 index f31e9c6..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/util/adt/balancetable.go +++ /dev/null @@ -1,40 +0,0 @@ -package adt - -import ( - addr "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - cid "github.com/ipfs/go-cid" -) - -// Bitwidth of balance table HAMTs, determined empirically from mutation -// patterns and projections of mainnet data -const BalanceTableBitwidth = 6 - -// A specialization of a map of addresses to (positive) token amounts. -// Absent keys implicitly have a balance of zero. -type BalanceTable Map - -// Interprets a store as balance table with root `r`. -func AsBalanceTable(s Store, r cid.Cid) (*BalanceTable, error) { - m, err := AsMap(s, r, BalanceTableBitwidth) - if err != nil { - return nil, err - } - - return &BalanceTable{ - root: m.root, - store: s, - }, nil -} - -// Gets the balance for a key, which is zero if they key has never been added to. -func (t *BalanceTable) Get(key addr.Address) (abi.TokenAmount, error) { - var value abi.TokenAmount - found, err := (*Map)(t).Get(abi.AddrKey(key), &value) - if !found || err != nil { - value = big.Zero() - } - - return value, err -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/util/adt/map.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/util/adt/map.go deleted file mode 100644 index de8bcb7..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/util/adt/map.go +++ /dev/null @@ -1,182 +0,0 @@ -package adt - -import ( - "bytes" - "crypto/sha256" - - hamt "github.com/filecoin-project/go-hamt-ipld/v3" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/cbor" - cid "github.com/ipfs/go-cid" - cbg "github.com/whyrusleeping/cbor-gen" - "golang.org/x/xerrors" -) - -// DefaultHamtOptions specifies default options used to construct Filecoin HAMTs. -// Specific HAMT instances may specify additional options, especially the bitwidth. -var DefaultHamtOptions = []hamt.Option{ - hamt.UseHashFunction(func(input []byte) []byte { - res := sha256.Sum256(input) - return res[:] - }), -} - -// Map stores key-value pairs in a HAMT. -type Map struct { - lastCid cid.Cid - root *hamt.Node - store Store -} - -// AsMap interprets a store as a HAMT-based map with root `r`. -// The HAMT is interpreted with branching factor 2^bitwidth. -// We could drop this parameter if https://github.com/filecoin-project/go-hamt-ipld/issues/79 is implemented. -func AsMap(s Store, root cid.Cid, bitwidth int) (*Map, error) { - options := append(DefaultHamtOptions, hamt.UseTreeBitWidth(bitwidth)) - nd, err := hamt.LoadNode(s.Context(), s, root, options...) - if err != nil { - return nil, xerrors.Errorf("failed to load hamt node: %w", err) - } - - return &Map{ - lastCid: root, - root: nd, - store: s, - }, nil -} - -// Creates a new map backed by an empty HAMT. -func MakeEmptyMap(s Store, bitwidth int) (*Map, error) { - options := append(DefaultHamtOptions, hamt.UseTreeBitWidth(bitwidth)) - nd, err := hamt.NewNode(s, options...) - if err != nil { - return nil, err - } - return &Map{ - lastCid: cid.Undef, - root: nd, - store: s, - }, nil -} - -// Creates and stores a new empty map, returning its CID. -func StoreEmptyMap(s Store, bitwidth int) (cid.Cid, error) { - m, err := MakeEmptyMap(s, bitwidth) - if err != nil { - return cid.Undef, err - } - return m.Root() -} - -// Returns the root cid of underlying HAMT. -func (m *Map) Root() (cid.Cid, error) { - if err := m.root.Flush(m.store.Context()); err != nil { - return cid.Undef, xerrors.Errorf("failed to flush map root: %w", err) - } - - c, err := m.store.Put(m.store.Context(), m.root) - if err != nil { - return cid.Undef, xerrors.Errorf("writing map root object: %w", err) - } - m.lastCid = c - - return c, nil -} - -// Put adds value `v` with key `k` to the hamt store. -func (m *Map) Put(k abi.Keyer, v cbor.Marshaler) error { - if err := m.root.Set(m.store.Context(), k.Key(), v); err != nil { - return xerrors.Errorf("failed to set key %v value %v in node %v: %w", k.Key(), v, m.lastCid, err) - } - return nil -} - -// Get retrieves the value at `k` into `out`, if the `k` is present and `out` is non-nil. -// Returns whether the key was found. -func (m *Map) Get(k abi.Keyer, out cbor.Unmarshaler) (bool, error) { - if found, err := m.root.Find(m.store.Context(), k.Key(), out); err != nil { - return false, xerrors.Errorf("failed to get key %v in node %v: %w", m.lastCid, k.Key(), err) - } else { - return found, nil - } -} - -// Has checks for the existence of a key without deserializing its value. -func (m *Map) Has(k abi.Keyer) (bool, error) { - if found, err := m.root.Find(m.store.Context(), k.Key(), nil); err != nil { - return false, xerrors.Errorf("failed to check key %v in node %v: %w", m.lastCid, k.Key(), err) - } else { - return found, nil - } -} - -// Sets key key `k` to value `v` iff the key is not already present. -func (m *Map) PutIfAbsent(k abi.Keyer, v cbor.Marshaler) (bool, error) { - if modified, err := m.root.SetIfAbsent(m.store.Context(), k.Key(), v); err != nil { - return false, xerrors.Errorf("failed to set key %v value %v in node %v: %w", k.Key(), v, m.lastCid, err) - } else { - return modified, nil - } -} - -// Removes the value at `k` from the hamt store, if it exists. -// Returns whether the key was previously present. -func (m *Map) TryDelete(k abi.Keyer) (bool, error) { - if found, err := m.root.Delete(m.store.Context(), k.Key()); err != nil { - return false, xerrors.Errorf("failed to delete key %v in node %v: %v", k.Key(), m.root, err) - } else { - return found, nil - } -} - -// Removes the value at `k` from the hamt store, expecting it to exist. -func (m *Map) Delete(k abi.Keyer) error { - if found, err := m.root.Delete(m.store.Context(), k.Key()); err != nil { - return xerrors.Errorf("failed to delete key %v in node %v: %v", k.Key(), m.root, err) - } else if !found { - return xerrors.Errorf("no such key %v to delete in node %v", k.Key(), m.root) - } - return nil -} - -// Iterates all entries in the map, deserializing each value in turn into `out` and then -// calling a function with the corresponding key. -// Iteration halts if the function returns an error. -// If the output parameter is nil, deserialization is skipped. -func (m *Map) ForEach(out cbor.Unmarshaler, fn func(key string) error) error { - return m.root.ForEach(m.store.Context(), func(k string, val *cbg.Deferred) error { - if out != nil { - // Why doesn't hamt.ForEach() just return the value as bytes? - err := out.UnmarshalCBOR(bytes.NewReader(val.Raw)) - if err != nil { - return err - } - } - return fn(k) - }) -} - -// Collects all the keys from the map into a slice of strings. -func (m *Map) CollectKeys() (out []string, err error) { - err = m.ForEach(nil, func(key string) error { - out = append(out, key) - return nil - }) - return -} - -// Retrieves the value for `k` into the 'out' unmarshaler (if non-nil), and removes the entry. -// Returns a boolean indicating whether the element was previously in the map. -func (m *Map) Pop(k abi.Keyer, out cbor.Unmarshaler) (bool, error) { - key := k.Key() - if found, err := m.root.Find(m.store.Context(), key, out); err != nil || !found { - return found, err - } - - if found, err := m.root.Delete(m.store.Context(), key); err != nil { - return false, err - } else if !found { - return false, xerrors.Errorf("failed to find key %v to delete", k.Key()) - } - return true, nil -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/util/adt/multimap.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/util/adt/multimap.go deleted file mode 100644 index 72d7452..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/util/adt/multimap.go +++ /dev/null @@ -1,34 +0,0 @@ -package adt - -import "github.com/ipfs/go-cid" - -// Multimap stores multiple values per key in a HAMT of AMTs. -// The order of insertion of values for each key is retained. -type Multimap struct { - mp *Map - innerBitwidth int -} - -// Creates a new map backed by an empty HAMT and flushes it to the store. -// The outer map has a branching factor of 2^bitwidth. -func MakeEmptyMultimap(s Store, outerBitwidth, innerBitwidth int) (*Multimap, error) { - m, err := MakeEmptyMap(s, outerBitwidth) - if err != nil { - return nil, err - } - return &Multimap{m, innerBitwidth}, nil -} - -// Creates and stores a new empty multimap, returning its CID. -func StoreEmptyMultimap(store Store, outerBitwidth, innerBitwidth int) (cid.Cid, error) { - mmap, err := MakeEmptyMultimap(store, outerBitwidth, innerBitwidth) - if err != nil { - return cid.Undef, err - } - return mmap.Root() -} - -// Returns the root cid of the underlying HAMT. -func (mm *Multimap) Root() (cid.Cid, error) { - return mm.mp.Root() -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/util/adt/store.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/util/adt/store.go deleted file mode 100644 index 004b409..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/util/adt/store.go +++ /dev/null @@ -1,32 +0,0 @@ -package adt - -import ( - "context" - - ipldcbor "github.com/ipfs/go-ipld-cbor" -) - -// Store defines an interface required to back the ADTs in this package. -type Store interface { - Context() context.Context - ipldcbor.IpldStore -} - -// Adapts a vanilla IPLD store as an ADT store. -func WrapStore(ctx context.Context, store ipldcbor.IpldStore) Store { - return &wstore{ - ctx: ctx, - IpldStore: store, - } -} - -type wstore struct { - ctx context.Context - ipldcbor.IpldStore -} - -var _ Store = &wstore{} - -func (s *wstore) Context() context.Context { - return s.ctx -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/util/math/expneg.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/util/math/expneg.go deleted file mode 100644 index 6432230..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/util/math/expneg.go +++ /dev/null @@ -1,61 +0,0 @@ -package math - -import ( - "math/big" -) - -var ( - // Coefficents in Q.128 format - expNumCoef []*big.Int - expDenoCoef []*big.Int -) - -func init() { - - // parameters are in integer format, - // coefficients are *2^-128 of that - // so we can just load them if we treat them as Q.128 - num := []string{ - "-648770010757830093818553637600", - "67469480939593786226847644286976", - "-3197587544499098424029388939001856", - "89244641121992890118377641805348864", - "-1579656163641440567800982336819953664", - "17685496037279256458459817590917169152", - "-115682590513835356866803355398940131328", - "340282366920938463463374607431768211456", - } - expNumCoef = Parse(num) - - deno := []string{ - "1225524182432722209606361", - "114095592300906098243859450", - "5665570424063336070530214243", - "194450132448609991765137938448", - "5068267641632683791026134915072", - "104716890604972796896895427629056", - "1748338658439454459487681798864896", - "23704654329841312470660182937960448", - "259380097567996910282699886670381056", - "2250336698853390384720606936038375424", - "14978272436876548034486263159246028800", - "72144088983913131323343765784380833792", - "224599776407103106596571252037123047424", - "340282366920938463463374607431768211456", - } - expDenoCoef = Parse(deno) -} - -// ExpNeg accepts x in Q.128 format and computes e^-x. -// It is most precise within [0, 1.725) range, where error is less than 3.4e-30. -// Over the [0, 5) range its error is less than 4.6e-15. -// Output is in Q.128 format. -func ExpNeg(x *big.Int) *big.Int { - // exp is approximated by rational function - // polynomials of the rational function are evaluated using Horner's method - num := Polyval(expNumCoef, x) // Q.128 - deno := Polyval(expDenoCoef, x) // Q.128 - - num = num.Lsh(num, Precision128) // Q.256 - return num.Div(num, deno) // Q.256 / Q.128 => Q.128 -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/util/math/ln.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/util/math/ln.go deleted file mode 100644 index 4d21b23..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/util/math/ln.go +++ /dev/null @@ -1,78 +0,0 @@ -package math - -import ( - gbig "math/big" - - "github.com/filecoin-project/go-state-types/big" -) - -var ( - // Coefficients in Q.128 format - lnNumCoef []*gbig.Int - lnDenomCoef []*gbig.Int - ln2 big.Int -) - -func init() { - // ln approximation coefficients - // parameters are in integer format, - // coefficients are *2^-128 of that - // so we can just load them if we treat them as Q.128 - num := []string{ - "261417938209272870992496419296200268025", - "7266615505142943436908456158054846846897", - "32458783941900493142649393804518050491988", - "17078670566130897220338060387082146864806", - "-35150353308172866634071793531642638290419", - "-20351202052858059355702509232125230498980", - "-1563932590352680681114104005183375350999", - } - lnNumCoef = Parse(num) - - denom := []string{ - "49928077726659937662124949977867279384", - "2508163877009111928787629628566491583994", - "21757751789594546643737445330202599887121", - "53400635271583923415775576342898617051826", - "41248834748603606604000911015235164348839", - "9015227820322455780436733526367238305537", - "340282366920938463463374607431768211456", - } - lnDenomCoef = Parse(denom) - - constStrs := []string{ - "235865763225513294137944142764154484399", // ln(2) - } - constBigs := Parse(constStrs) - ln2 = big.NewFromGo(constBigs[0]) -} - -// The natural log of Q.128 x. -func Ln(z big.Int) big.Int { - // bitlen - 1 - precision - k := int64(z.BitLen()) - 1 - Precision128 // Q.0 - x := big.Zero() // nolint:ineffassign - - if k > 0 { - x = big.Rsh(z, uint(k)) // Q.128 - } else { - x = big.Lsh(z, uint(-k)) // Q.128 - } - - // ln(z) = ln(x * 2^k) = ln(x) + k * ln2 - lnz := big.Mul(big.NewInt(k), ln2) // Q.0 * Q.128 => Q.128 - return big.Sum(lnz, lnBetweenOneAndTwo(x)) // Q.128 -} - -// The natural log of x, specified in Q.128 format -// Should only use with 1 <= x <= 2 -// Output is in Q.128 format. -func lnBetweenOneAndTwo(x big.Int) big.Int { - // ln is approximated by rational function - // polynomials of the rational function are evaluated using Horner's method - num := Polyval(lnNumCoef, x.Int) // Q.128 - denom := Polyval(lnDenomCoef, x.Int) // Q.128 - - num = num.Lsh(num, Precision128) // Q.128 => Q.256 - return big.NewFromGo(num.Div(num, denom)) // Q.256 / Q.128 => Q.128 -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/util/math/parse.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/util/math/parse.go deleted file mode 100644 index aee6c16..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/util/math/parse.go +++ /dev/null @@ -1,18 +0,0 @@ -package math - -import "math/big" - -// Parse a slice of strings (representing integers in decimal) -// Convention: this function is to be applied to strings representing Q.128 fixed-point numbers, and thus returns numbers in binary Q.128 representation -func Parse(coefs []string) []*big.Int { - out := make([]*big.Int, len(coefs)) - for i, coef := range coefs { - c, ok := new(big.Int).SetString(coef, 10) - if !ok { - panic("could not parse q128 parameter") - } - // << 128 (Q.0 to Q.128) >> 128 to transform integer params to coefficients - out[i] = c - } - return out -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/util/math/polyval.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/util/math/polyval.go deleted file mode 100644 index 81412d6..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/util/math/polyval.go +++ /dev/null @@ -1,22 +0,0 @@ -package math - -import "math/big" - -// note: all coefficients for which Polyval is used would need to be updated if this precision changes -const Precision128 = 128 - -// polyval evaluates a polynomial given by coefficients `p` in Q.128 format -// at point `x` in Q.128 format. Output is in Q.128. -// Coefficients should be ordered from the highest order coefficient to the lowest. -func Polyval(p []*big.Int, x *big.Int) *big.Int { - // evaluation using Horner's method - res := new(big.Int).Set(p[0]) // Q.128 - tmp := new(big.Int) // big.Int.Mul doesn't like when input is reused as output - for _, c := range p[1:] { - tmp = tmp.Mul(res, x) // Q.128 * Q.128 => Q.256 - res = res.Rsh(tmp, Precision128) // Q.256 >> 128 => Q.128 - res = res.Add(res, c) - } - - return res -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/util/smoothing/alpha_beta_filter.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/util/smoothing/alpha_beta_filter.go deleted file mode 100644 index c7c352e..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/util/smoothing/alpha_beta_filter.go +++ /dev/null @@ -1,101 +0,0 @@ -package smoothing - -import ( - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/go-state-types/builtin/v9/util/math" -) - -var ( - DefaultAlpha big.Int // Q.128 value of 9.25e-4 - DefaultBeta big.Int // Q.128 value of 2.84e-7 - - ExtrapolatedCumSumRatioEpsilon big.Int // Q.128 value of 2^-50 -) - -func init() { - // Alpha Beta Filter constants - constStrs := []string{ - "314760000000000000000000000000000000", // DefaultAlpha - "96640100000000000000000000000000", // DefaultBeta - "302231454903657293676544", // Epsilon - - } - constBigs := math.Parse(constStrs) - DefaultAlpha = big.NewFromGo(constBigs[0]) - DefaultBeta = big.NewFromGo(constBigs[1]) - ExtrapolatedCumSumRatioEpsilon = big.NewFromGo(constBigs[2]) - -} - -//Alpha Beta Filter "position" (value) and "velocity" (rate of change of value) estimates -//Estimates are in Q.128 format -type FilterEstimate struct { - PositionEstimate big.Int // Q.128 - VelocityEstimate big.Int // Q.128 -} - -// Returns the Q.0 position estimate of the filter -func Estimate(fe *FilterEstimate) big.Int { - return big.Rsh(fe.PositionEstimate, math.Precision128) // Q.128 => Q.0 -} - -// Create a new filter estimate given two Q.0 format ints. -func NewEstimate(position, velocity big.Int) FilterEstimate { - return FilterEstimate{ - PositionEstimate: big.Lsh(position, math.Precision128), // Q.0 => Q.128 - VelocityEstimate: big.Lsh(velocity, math.Precision128), // Q.0 => Q.128 - } -} - -// Extrapolate the CumSumRatio given two filters. -// Output is in Q.128 format -func ExtrapolatedCumSumOfRatio(delta abi.ChainEpoch, relativeStart abi.ChainEpoch, estimateNum, estimateDenom FilterEstimate) big.Int { - deltaT := big.Lsh(big.NewInt(int64(delta)), math.Precision128) // Q.0 => Q.128 - t0 := big.Lsh(big.NewInt(int64(relativeStart)), math.Precision128) // Q.0 => Q.128 - // Renaming for ease of following spec and clarity - position1 := estimateNum.PositionEstimate - position2 := estimateDenom.PositionEstimate - velocity1 := estimateNum.VelocityEstimate - velocity2 := estimateDenom.VelocityEstimate - - squaredVelocity2 := big.Mul(velocity2, velocity2) // Q.128 * Q.128 => Q.256 - squaredVelocity2 = big.Rsh(squaredVelocity2, math.Precision128) // Q.256 => Q.128 - - if squaredVelocity2.GreaterThan(ExtrapolatedCumSumRatioEpsilon) { - x2a := big.Mul(t0, velocity2) // Q.128 * Q.128 => Q.256 - x2a = big.Rsh(x2a, math.Precision128) // Q.256 => Q.128 - x2a = big.Sum(position2, x2a) - - x2b := big.Mul(deltaT, velocity2) // Q.128 * Q.128 => Q.256 - x2b = big.Rsh(x2b, math.Precision128) // Q.256 => Q.128 - x2b = big.Sum(x2a, x2b) - - x2a = math.Ln(x2a) // Q.128 - x2b = math.Ln(x2b) // Q.128 - - m1 := big.Sub(x2b, x2a) - m1 = big.Mul(velocity2, big.Mul(position1, m1)) // Q.128 * Q.128 * Q.128 => Q.384 - m1 = big.Rsh(m1, math.Precision128) //Q.384 => Q.256 - - m2L := big.Sub(x2a, x2b) - m2L = big.Mul(position2, m2L) // Q.128 * Q.128 => Q.256 - m2R := big.Mul(velocity2, deltaT) // Q.128 * Q.128 => Q.256 - m2 := big.Sum(m2L, m2R) - m2 = big.Mul(velocity1, m2) // Q.256 => Q.384 - m2 = big.Rsh(m2, math.Precision128) //Q.384 => Q.256 - - return big.Div(big.Sum(m1, m2), squaredVelocity2) // Q.256 / Q.128 => Q.128 - - } - - halfDeltaT := big.Rsh(deltaT, 1) // Q.128 / Q.0 => Q.128 - x1m := big.Mul(velocity1, big.Sum(t0, halfDeltaT)) // Q.128 * Q.128 => Q.256 - x1m = big.Rsh(x1m, math.Precision128) // Q.256 => Q.128 - x1m = big.Add(position1, x1m) - - cumsumRatio := big.Mul(x1m, deltaT) // Q.128 * Q.128 => Q.256 - cumsumRatio = big.Div(cumsumRatio, position2) // Q.256 / Q.128 => Q.128 - return cumsumRatio - -} diff --git a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/util/smoothing/cbor_gen.go b/vendor/github.com/filecoin-project/go-state-types/builtin/v9/util/smoothing/cbor_gen.go deleted file mode 100644 index c7742ef..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/builtin/v9/util/smoothing/cbor_gen.go +++ /dev/null @@ -1,75 +0,0 @@ -// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. - -package smoothing - -import ( - "fmt" - "io" - - cbg "github.com/whyrusleeping/cbor-gen" - xerrors "golang.org/x/xerrors" -) - -var _ = xerrors.Errorf - -var lengthBufFilterEstimate = []byte{130} - -func (t *FilterEstimate) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufFilterEstimate); err != nil { - return err - } - - // t.PositionEstimate (big.Int) (struct) - if err := t.PositionEstimate.MarshalCBOR(w); err != nil { - return err - } - - // t.VelocityEstimate (big.Int) (struct) - if err := t.VelocityEstimate.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *FilterEstimate) UnmarshalCBOR(r io.Reader) error { - *t = FilterEstimate{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.PositionEstimate (big.Int) (struct) - - { - - if err := t.PositionEstimate.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.PositionEstimate: %w", err) - } - - } - // t.VelocityEstimate (big.Int) (struct) - - { - - if err := t.VelocityEstimate.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.VelocityEstimate: %w", err) - } - - } - return nil -} diff --git a/vendor/github.com/filecoin-project/go-state-types/cbor/cbor.go b/vendor/github.com/filecoin-project/go-state-types/cbor/cbor.go deleted file mode 100644 index 86f70df..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/cbor/cbor.go +++ /dev/null @@ -1,18 +0,0 @@ -package cbor - -import "io" - -// These interfaces are intended to match those from whyrusleeping/cbor-gen, such that code generated from that -// system is automatically usable here (but not mandatory). -type Marshaler interface { - MarshalCBOR(w io.Writer) error -} - -type Unmarshaler interface { - UnmarshalCBOR(r io.Reader) error -} - -type Er interface { - Marshaler - Unmarshaler -} diff --git a/vendor/github.com/filecoin-project/go-state-types/crypto/randomness.go b/vendor/github.com/filecoin-project/go-state-types/crypto/randomness.go deleted file mode 100644 index 0e0e8f5..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/crypto/randomness.go +++ /dev/null @@ -1,16 +0,0 @@ -package crypto - -// Specifies a domain for randomness generation. -type DomainSeparationTag int64 - -const ( - DomainSeparationTag_TicketProduction DomainSeparationTag = 1 + iota - DomainSeparationTag_ElectionProofProduction - DomainSeparationTag_WinningPoStChallengeSeed - DomainSeparationTag_WindowedPoStChallengeSeed - DomainSeparationTag_SealRandomness - DomainSeparationTag_InteractiveSealChallengeSeed - DomainSeparationTag_WindowedPoStDeadlineAssignment - DomainSeparationTag_MarketDealCronSeed - DomainSeparationTag_PoStChainCommit -) diff --git a/vendor/github.com/filecoin-project/go-state-types/crypto/signature.go b/vendor/github.com/filecoin-project/go-state-types/crypto/signature.go deleted file mode 100644 index f856980..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/crypto/signature.go +++ /dev/null @@ -1,125 +0,0 @@ -package crypto - -import ( - "bytes" - "fmt" - "io" - "math" - - cbg "github.com/whyrusleeping/cbor-gen" -) - -type SigType byte - -const ( - SigTypeUnknown = SigType(math.MaxUint8) - - SigTypeSecp256k1 = SigType(iota) - SigTypeBLS -) - -func (t SigType) Name() (string, error) { - switch t { - case SigTypeUnknown: - return "unknown", nil - case SigTypeSecp256k1: - return "secp256k1", nil - case SigTypeBLS: - return "bls", nil - default: - return "", fmt.Errorf("invalid signature type: %d", t) - } -} - -const SignatureMaxLength = 200 - -type Signature struct { - Type SigType - Data []byte -} - -func (s *Signature) Equals(o *Signature) bool { - if s == nil || o == nil { - return s == o - } - return s.Type == o.Type && bytes.Equal(s.Data, o.Data) -} - -func (s *Signature) MarshalCBOR(w io.Writer) error { - if s == nil { - _, err := w.Write(cbg.CborNull) - return err - } - - header := cbg.CborEncodeMajorType(cbg.MajByteString, uint64(len(s.Data)+1)) - if _, err := w.Write(header); err != nil { - return err - } - if _, err := w.Write([]byte{byte(s.Type)}); err != nil { - return err - } - if _, err := w.Write(s.Data); err != nil { - return err - } - return nil -} - -func (s *Signature) UnmarshalCBOR(br io.Reader) error { - maj, l, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - - if maj != cbg.MajByteString { - return fmt.Errorf("not a byte string") - } - if l > SignatureMaxLength { - return fmt.Errorf("string too long") - } - if l == 0 { - return fmt.Errorf("string empty") - } - buf := make([]byte, l) - if _, err = io.ReadFull(br, buf); err != nil { - return err - } - switch SigType(buf[0]) { - default: - return fmt.Errorf("invalid signature type in cbor input: %d", buf[0]) - case SigTypeSecp256k1: - s.Type = SigTypeSecp256k1 - case SigTypeBLS: - s.Type = SigTypeBLS - } - s.Data = buf[1:] - return nil -} - -func (s *Signature) MarshalBinary() ([]byte, error) { - bs := make([]byte, len(s.Data)+1) - bs[0] = byte(s.Type) - copy(bs[1:], s.Data) - return bs, nil -} - -func (s *Signature) UnmarshalBinary(bs []byte) error { - if len(bs) > SignatureMaxLength { - return fmt.Errorf("invalid signature bytes, too long (%d)", len(bs)) - } - if len(bs) == 0 { - return fmt.Errorf("invalid signature bytes of length 0") - } - switch SigType(bs[0]) { - default: - // Do not error during unmarshal but leave a standard value. - // unmarshal(marshal(zero valued sig)) is valuable for test - // and type needs to be checked by caller anyway. - s.Type = SigTypeUnknown - case SigTypeSecp256k1: - s.Type = SigTypeSecp256k1 - case SigTypeBLS: - s.Type = SigTypeBLS - } - s.Data = bs[1:] - return nil -} diff --git a/vendor/github.com/filecoin-project/go-state-types/dline/deadline.go b/vendor/github.com/filecoin-project/go-state-types/dline/deadline.go deleted file mode 100644 index 06bc107..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/dline/deadline.go +++ /dev/null @@ -1,143 +0,0 @@ -package dline - -import "github.com/filecoin-project/go-state-types/abi" - -// Deadline calculations with respect to a current epoch. -// "Deadline" refers to the window during which proofs may be submitted. -// Windows are non-overlapping ranges [Open, Close), but the challenge epoch for a window occurs before -// the window opens. -// The current epoch may not necessarily lie within the deadline or proving period represented here. -type Info struct { - // Deadline parameters - CurrentEpoch abi.ChainEpoch // Epoch at which this info was calculated. - PeriodStart abi.ChainEpoch // First epoch of the proving period (<= CurrentEpoch). - Index uint64 // A deadline index, in [0..d.WPoStProvingPeriodDeadlines) unless period elapsed. - Open abi.ChainEpoch // First epoch from which a proof may be submitted (>= CurrentEpoch). - Close abi.ChainEpoch // First epoch from which a proof may no longer be submitted (>= Open). - Challenge abi.ChainEpoch // Epoch at which to sample the chain for challenge (< Open). - FaultCutoff abi.ChainEpoch // First epoch at which a fault declaration is rejected (< Open). - - // Protocol parameters - WPoStPeriodDeadlines uint64 - WPoStProvingPeriod abi.ChainEpoch // the number of epochs in a window post proving period - WPoStChallengeWindow abi.ChainEpoch - WPoStChallengeLookback abi.ChainEpoch - FaultDeclarationCutoff abi.ChainEpoch -} - -// Whether the proving period has begun. -func (d *Info) PeriodStarted() bool { - return d.CurrentEpoch >= d.PeriodStart -} - -// Whether the proving period has elapsed. -func (d *Info) PeriodElapsed() bool { - return d.CurrentEpoch >= d.NextPeriodStart() -} - -// The last epoch in the proving period. -func (d *Info) PeriodEnd() abi.ChainEpoch { - return d.PeriodStart + d.WPoStProvingPeriod - 1 -} - -// The first epoch in the next proving period. -func (d *Info) NextPeriodStart() abi.ChainEpoch { - return d.PeriodStart + d.WPoStProvingPeriod -} - -// Whether the current deadline is currently open. -func (d *Info) IsOpen() bool { - return d.CurrentEpoch >= d.Open && d.CurrentEpoch < d.Close -} - -// Whether the current deadline has already closed. -func (d *Info) HasElapsed() bool { - return d.CurrentEpoch >= d.Close -} - -// The last epoch during which a proof may be submitted. -func (d *Info) Last() abi.ChainEpoch { - return d.Close - 1 -} - -// Epoch at which the subsequent deadline opens. -func (d *Info) NextOpen() abi.ChainEpoch { - return d.Close -} - -// Whether the deadline's fault cutoff has passed. -func (d *Info) FaultCutoffPassed() bool { - return d.CurrentEpoch >= d.FaultCutoff -} - -// Returns the next instance of this deadline that has not yet elapsed. -func (d *Info) NextNotElapsed() *Info { - // If the deadline hasn't elapsed, do nothing. - if !d.HasElapsed() { - return d - } - - // find a nearby period start - // 1. first, find our period's offset from the "global" period - offset := d.PeriodStart % d.WPoStProvingPeriod - // handle negative period starts just in case. - if offset < 0 { - offset += d.WPoStProvingPeriod - } - // 2. determine the global period index. - globalPeriod := (d.CurrentEpoch / d.WPoStProvingPeriod) - // 3. Determine our next period start. - periodStart := globalPeriod*d.WPoStProvingPeriod + offset - - // Backtrack so the period starts before the current epoch. This should usually run 0 or 1 times. - for periodStart > d.CurrentEpoch { - periodStart -= d.WPoStProvingPeriod - } - - // If the next deadline opens at or after the current epoch, move to the next pp. - if d.CurrentEpoch >= periodStart+abi.ChainEpoch(d.Index+1)*d.WPoStChallengeWindow { - periodStart += d.WPoStProvingPeriod - } - - return NewInfo(periodStart, d.Index, d.CurrentEpoch, d.WPoStPeriodDeadlines, d.WPoStProvingPeriod, d.WPoStChallengeWindow, d.WPoStChallengeLookback, d.FaultDeclarationCutoff) -} - -// Returns deadline-related calculations for a deadline in some proving period and the current epoch. -func NewInfo(periodStart abi.ChainEpoch, deadlineIdx uint64, currEpoch abi.ChainEpoch, wPoStPeriodDeadlines uint64, wPoStProvingPeriod, wPoStChallengeWindow, wPoStChallengeLookback, faultDeclarationCutoff abi.ChainEpoch) *Info { - if deadlineIdx < wPoStPeriodDeadlines { - deadlineOpen := periodStart + (abi.ChainEpoch(deadlineIdx) * wPoStChallengeWindow) - return &Info{ - CurrentEpoch: currEpoch, - PeriodStart: periodStart, - Index: deadlineIdx, - Open: deadlineOpen, - Close: deadlineOpen + wPoStChallengeWindow, - Challenge: deadlineOpen - wPoStChallengeLookback, - FaultCutoff: deadlineOpen - faultDeclarationCutoff, - // parameters - WPoStPeriodDeadlines: wPoStPeriodDeadlines, - WPoStProvingPeriod: wPoStProvingPeriod, - WPoStChallengeWindow: wPoStChallengeWindow, - WPoStChallengeLookback: wPoStChallengeLookback, - FaultDeclarationCutoff: faultDeclarationCutoff, - } - } else { - // Return deadline info for a no-duration deadline immediately after the last real one. - afterLastDeadline := periodStart + wPoStProvingPeriod - return &Info{ - CurrentEpoch: currEpoch, - PeriodStart: periodStart, - Index: deadlineIdx, - Open: afterLastDeadline, - Close: afterLastDeadline, - Challenge: afterLastDeadline, - FaultCutoff: 0, - // parameters - WPoStPeriodDeadlines: wPoStPeriodDeadlines, - WPoStProvingPeriod: wPoStProvingPeriod, - WPoStChallengeWindow: wPoStChallengeWindow, - WPoStChallengeLookback: wPoStChallengeLookback, - FaultDeclarationCutoff: faultDeclarationCutoff, - } - } -} diff --git a/vendor/github.com/filecoin-project/go-state-types/exitcode/common.go b/vendor/github.com/filecoin-project/go-state-types/exitcode/common.go deleted file mode 100644 index 4ba4536..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/exitcode/common.go +++ /dev/null @@ -1,29 +0,0 @@ -package exitcode - -// Common error codes that may be shared by different actors. -// Actors may also define their own codes, including redefining these values. - -const ( - // ErrIllegalArgument Indicates a method parameter is invalid. - ErrIllegalArgument = FirstActorErrorCode + iota - // ErrNotFound Indicates a requested resource does not exist. - ErrNotFound - // ErrForbidden Indicates an action is disallowed. - ErrForbidden - // ErrInsufficientFunds Indicates a balance of funds is insufficient. - ErrInsufficientFunds - // ErrIllegalState Indicates an actor's internal state is invalid. - ErrIllegalState - // ErrSerialization Indicates de/serialization failure within actor code. - ErrSerialization - // ErrUnhandledMessage Indicates the actor cannot handle this message. - ErrUnhandledMessage - // ErrUnspecified Indicates the actor failed with an unspecified error. - ErrUnspecified - // ErrAssertionFailed Indicates the actor failed a user-level assertion - ErrAssertionFailed - - // Common error codes stop here. If you define a common error code above - // this value it will have conflicting interpretations - FirstActorSpecificExitCode = ExitCode(32) -) diff --git a/vendor/github.com/filecoin-project/go-state-types/exitcode/exitcode.go b/vendor/github.com/filecoin-project/go-state-types/exitcode/exitcode.go deleted file mode 100644 index d604909..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/exitcode/exitcode.go +++ /dev/null @@ -1,96 +0,0 @@ -package exitcode - -import ( - "errors" - "fmt" - "strconv" - - "golang.org/x/xerrors" -) - -type ExitCode int64 - -func (x ExitCode) IsSuccess() bool { - return x == Ok -} - -func (x ExitCode) IsError() bool { - return !x.IsSuccess() -} - -// Whether an exit code indicates a message send failure. -// A send failure means that the caller's CallSeqNum is not incremented and the caller has not paid -// gas fees for the message (because the caller doesn't exist or can't afford it). -// A receipt with send failure does not indicate that the message (or another one carrying the same CallSeqNum) -// could not apply in the future, against a different state. -func (x ExitCode) IsSendFailure() bool { - return x == SysErrSenderInvalid || x == SysErrSenderStateInvalid -} - -// A non-canonical string representation for human inspection. -func (x ExitCode) String() string { - name, ok := names[x] - if ok { - return fmt.Sprintf("%s(%d)", name, x) - } - return strconv.FormatInt(int64(x), 10) -} - -// Implement error to trigger Go compiler checking of exit code return values. -func (x ExitCode) Error() string { - return x.String() -} - -// Wrapf attaches an error message, and possibly an error, to the exit -// code. -// -// err := ErrIllegalArgument.Wrapf("my description: %w", err) -// exitcode.Unwrap(exitcode.ErrIllegalState, err) == exitcode.ErrIllegalArgument -func (x ExitCode) Wrapf(msg string, args ...interface{}) error { - return &wrapped{ - exitCode: x, - cause: xerrors.Errorf(msg, args...), - } -} - -type wrapped struct { - exitCode ExitCode - cause error -} - -func (w *wrapped) String() string { - return w.Error() -} - -func (w *wrapped) Error() string { - // Don't include the exit code. That will be handled by the runtime and - // this error has likely been wrapped multiple times. - return w.cause.Error() -} - -// implements the interface required by errors.As -func (w *wrapped) As(target interface{}) bool { - return errors.As(w.exitCode, target) || errors.As(w.cause, target) -} - -// implements the interface required by errors.Is -func (w *wrapped) Is(target error) bool { - if _, ok := target.(ExitCode); ok { - // If the target is an exit code, make sure we shadow lower exit - // codes. - return w.exitCode == target - } - return errors.Is(w.cause, target) -} - -// Unwrap extracts an exit code from an error, defaulting to the passed default -// exit code. -// -// err := ErrIllegalState.WithContext("my description: %w", err) -// exitcode.Unwrap(exitcode.ErrIllegalState, err) == exitcode.ErrIllegalArgument -func Unwrap(err error, defaultExitCode ExitCode) (code ExitCode) { - if errors.As(err, &code) { - return code - } - return defaultExitCode -} diff --git a/vendor/github.com/filecoin-project/go-state-types/exitcode/reserved.go b/vendor/github.com/filecoin-project/go-state-types/exitcode/reserved.go deleted file mode 100644 index dba55e9..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/exitcode/reserved.go +++ /dev/null @@ -1,82 +0,0 @@ -package exitcode - -// The system error codes are reserved for use by the runtime. -// No actor may use one explicitly. Correspondingly, no runtime invocation should abort with an exit -// code outside this list. -// We could move these definitions out of this package and into the runtime spec. -const ( - Ok = ExitCode(0) - - // Indicates that the actor identified as the sender of a message is not valid as a message sender: - // - not present in the state tree - // - not an account actor (for top-level messages) - // - code CID is not found or invalid - // (not found in the state tree, not an account, has no code). - SysErrSenderInvalid = ExitCode(1) - - // Indicates that the sender of a message is not in a state to send the message: - // - invocation out of sequence (mismatched CallSeqNum) - // - insufficient funds to cover execution - SysErrSenderStateInvalid = ExitCode(2) - - // Indicates failure to find a method in an actor. - SysErrInvalidMethod = ExitCode(3) - - // Indicates the message receiver trapped (panicked - SysErrIllegalInstruction = ExitCode(4) - - // Indicates that the receiver of a message is not valid (and cannot be implicitly created). - SysErrInvalidReceiver = ExitCode(5) - - // Indicates that a message sender has insufficient balance for the value being sent. - // Note that this is distinct from SysErrSenderStateInvalid when a top-level sender can't cover - // value transfer + gas. This code is only expected to come from inter-actor sends. - SysErrInsufficientFunds = ExitCode(6) - - // Indicates message execution (including subcalls) used more gas than the specified limit. - SysErrOutOfGas = ExitCode(7) - - // Indicates message execution is forbidden for the caller by runtime caller validation. - SysErrForbidden = ExitCode(8) - - // Indicates actor code performed a disallowed operation. Disallowed operations include: - // - mutating state outside of a state acquisition block - // - failing to invoke caller validation - // - aborting with a reserved exit code (including success or a system error). - SysErrorIllegalActor = ExitCode(9) - - // Indicates an invalid argument passed to a runtime method. - SysErrorIllegalArgument = ExitCode(10) - - // Indicates the actor returned a block handle that doesn't exist - SysErrMissingReturn = ExitCode(11) - - // Unused - SysErrReserved3 = ExitCode(12) - SysErrReserved4 = ExitCode(13) - SysErrReserved5 = ExitCode(14) - SysErrReserved6 = ExitCode(15) -) - -// The initial range of exit codes is reserved for system errors. -// Actors may define codes starting with this one. -const FirstActorErrorCode = ExitCode(16) - -var names = map[ExitCode]string{ - Ok: "Ok", - SysErrSenderInvalid: "SysErrSenderInvalid", - SysErrSenderStateInvalid: "SysErrSenderStateInvalid", - SysErrInvalidMethod: "SysErrInvalidMethod", - SysErrIllegalInstruction: "SysErrIllegalInstruction", - SysErrInvalidReceiver: "SysErrInvalidReceiver", - SysErrInsufficientFunds: "SysErrInsufficientFunds", - SysErrOutOfGas: "SysErrOutOfGas", - SysErrForbidden: "SysErrForbidden", - SysErrorIllegalActor: "SysErrorIllegalActor", - SysErrorIllegalArgument: "SysErrorIllegalArgument", - SysErrMissingReturn: "SysErrMissingReturn", - SysErrReserved3: "SysErrReserved3", - SysErrReserved4: "SysErrReserved4", - SysErrReserved5: "SysErrReserved5", - SysErrReserved6: "SysErrReserved6", -} diff --git a/vendor/github.com/filecoin-project/go-state-types/manifest/cbor_gen.go b/vendor/github.com/filecoin-project/go-state-types/manifest/cbor_gen.go deleted file mode 100644 index d596609..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/manifest/cbor_gen.go +++ /dev/null @@ -1,165 +0,0 @@ -// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. - -package manifest - -import ( - "fmt" - "io" - - cbg "github.com/whyrusleeping/cbor-gen" - xerrors "golang.org/x/xerrors" -) - -var _ = xerrors.Errorf - -var lengthBufManifest = []byte{130} - -func (t *Manifest) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufManifest); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Version (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Version)); err != nil { - return err - } - - // t.Data (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.Data); err != nil { - return xerrors.Errorf("failed to write cid field t.Data: %w", err) - } - - return nil -} - -func (t *Manifest) UnmarshalCBOR(r io.Reader) error { - *t = Manifest{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Version (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Version = uint64(extra) - - } - // t.Data (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.Data: %w", err) - } - - t.Data = c - - } - return nil -} - -var lengthBufManifestEntry = []byte{130} - -func (t *ManifestEntry) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufManifestEntry); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Name (string) (string) - if len(t.Name) > cbg.MaxLength { - return xerrors.Errorf("Value in field t.Name was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Name))); err != nil { - return err - } - if _, err := io.WriteString(w, string(t.Name)); err != nil { - return err - } - - // t.Code (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.Code); err != nil { - return xerrors.Errorf("failed to write cid field t.Code: %w", err) - } - - return nil -} - -func (t *ManifestEntry) UnmarshalCBOR(r io.Reader) error { - *t = ManifestEntry{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Name (string) (string) - - { - sval, err := cbg.ReadStringBuf(br, scratch) - if err != nil { - return err - } - - t.Name = string(sval) - } - // t.Code (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.Code: %w", err) - } - - t.Code = c - - } - return nil -} diff --git a/vendor/github.com/filecoin-project/go-state-types/manifest/manifest.go b/vendor/github.com/filecoin-project/go-state-types/manifest/manifest.go deleted file mode 100644 index 16f35e0..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/manifest/manifest.go +++ /dev/null @@ -1,111 +0,0 @@ -package manifest - -import ( - "context" - "fmt" - "io" - - "github.com/filecoin-project/go-state-types/builtin/v8/util/adt" - - "github.com/ipfs/go-cid" - - cbg "github.com/whyrusleeping/cbor-gen" -) - -type Manifest struct { - Version uint64 // this is really u32, but cbor-gen can't deal with it - Data cid.Cid - - entries map[string]cid.Cid -} - -type ManifestEntry struct { - Name string - Code cid.Cid -} - -type ManifestData struct { - Entries []ManifestEntry -} - -func (m *Manifest) Load(ctx context.Context, store adt.Store) error { - if m.Version != 1 { - return fmt.Errorf("unknown manifest version %d", m.Version) - } - - data := ManifestData{} - if err := store.Get(ctx, m.Data, &data); err != nil { - return err - } - - m.entries = make(map[string]cid.Cid) - for _, e := range data.Entries { - m.entries[e.Name] = e.Code - } - - return nil -} - -func (m *Manifest) Get(name string) (cid.Cid, bool) { - c, ok := m.entries[name] - return c, ok -} - -// this is a flat tuple, so we need to write these by hand -func (d *ManifestData) UnmarshalCBOR(r io.Reader) error { - *d = ManifestData{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra > cbg.MaxLength { - return fmt.Errorf("too many manifest entries") - } - - entries := int(extra) - d.Entries = make([]ManifestEntry, 0, entries) - - for i := 0; i < entries; i++ { - entry := ManifestEntry{} - if err := entry.UnmarshalCBOR(r); err != nil { - return fmt.Errorf("error unmarsnalling manifest entry: %w", err) - } - - d.Entries = append(d.Entries, entry) - } - - return nil -} - -func (d *ManifestData) MarshalCBOR(w io.Writer) error { - if d == nil { - _, err := w.Write(cbg.CborNull) - return err - } - - scratch := make([]byte, 9) - - if len(d.Entries) > cbg.MaxLength { - return fmt.Errorf("too many manifest entries") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(d.Entries))); err != nil { - return err - } - - for _, v := range d.Entries { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/filecoin-project/go-state-types/network/version.go b/vendor/github.com/filecoin-project/go-state-types/network/version.go deleted file mode 100644 index c3759b2..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/network/version.go +++ /dev/null @@ -1,31 +0,0 @@ -package network - -import "math" - -// Enumeration of network upgrades where actor behaviour can change (without necessarily -// vendoring and versioning the whole actor codebase). -type Version uint - -const ( - Version0 = Version(iota) // genesis (specs-actors v0.9.3) - Version1 // breeze (specs-actors v0.9.7) - Version2 // smoke (specs-actors v0.9.8) - Version3 // ignition (specs-actors v0.9.11) - Version4 // actors v2 (specs-actors v2.0.3) - Version5 // tape (specs-actors v2.1.0) - Version6 // kumquat (specs-actors v2.2.0) - Version7 // calico (specs-actors v2.3.2) - Version8 // persian (post-2.3.2 behaviour transition) - Version9 // orange (post-2.3.2 behaviour transition) - Version10 // trust (specs-actors v3.0.1) - Version11 // norwegian (specs-actors v3.1.0) - Version12 // turbo (specs-actors v4.0.0) - Version13 // hyperdrive (specs-actors v5.0.1) - Version14 // chocolate (specs-actors v6.0.0) - Version15 // ??? - Version16 // ??? - Version17 // ??? - - // VersionMax is the maximum version number - VersionMax = Version(math.MaxUint32) -) diff --git a/vendor/github.com/filecoin-project/go-state-types/proof/cbor_gen.go b/vendor/github.com/filecoin-project/go-state-types/proof/cbor_gen.go deleted file mode 100644 index 8816a82..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/proof/cbor_gen.go +++ /dev/null @@ -1,1002 +0,0 @@ -// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. - -package proof - -import ( - "fmt" - "io" - - abi "github.com/filecoin-project/go-state-types/abi" - cbg "github.com/whyrusleeping/cbor-gen" - xerrors "golang.org/x/xerrors" -) - -var _ = xerrors.Errorf - -var lengthBufPoStProof = []byte{130} - -func (t *PoStProof) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufPoStProof); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.PoStProof (abi.RegisteredPoStProof) (int64) - if t.PoStProof >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.PoStProof)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.PoStProof-1)); err != nil { - return err - } - } - - // t.ProofBytes ([]uint8) (slice) - if len(t.ProofBytes) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.ProofBytes was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.ProofBytes))); err != nil { - return err - } - - if _, err := w.Write(t.ProofBytes[:]); err != nil { - return err - } - return nil -} - -func (t *PoStProof) UnmarshalCBOR(r io.Reader) error { - *t = PoStProof{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.PoStProof (abi.RegisteredPoStProof) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.PoStProof = abi.RegisteredPoStProof(extraI) - } - // t.ProofBytes ([]uint8) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.ProofBytes: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - - if extra > 0 { - t.ProofBytes = make([]uint8, extra) - } - - if _, err := io.ReadFull(br, t.ProofBytes[:]); err != nil { - return err - } - return nil -} - -var lengthBufExtendedSectorInfo = []byte{132} - -func (t *ExtendedSectorInfo) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufExtendedSectorInfo); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.SealProof (abi.RegisteredSealProof) (int64) - if t.SealProof >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SealProof)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.SealProof-1)); err != nil { - return err - } - } - - // t.SectorNumber (abi.SectorNumber) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SectorNumber)); err != nil { - return err - } - - // t.SectorKey (cid.Cid) (struct) - - if t.SectorKey == nil { - if _, err := w.Write(cbg.CborNull); err != nil { - return err - } - } else { - if err := cbg.WriteCidBuf(scratch, w, *t.SectorKey); err != nil { - return xerrors.Errorf("failed to write cid field t.SectorKey: %w", err) - } - } - - // t.SealedCID (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.SealedCID); err != nil { - return xerrors.Errorf("failed to write cid field t.SealedCID: %w", err) - } - - return nil -} - -func (t *ExtendedSectorInfo) UnmarshalCBOR(r io.Reader) error { - *t = ExtendedSectorInfo{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 4 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.SealProof (abi.RegisteredSealProof) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.SealProof = abi.RegisteredSealProof(extraI) - } - // t.SectorNumber (abi.SectorNumber) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.SectorNumber = abi.SectorNumber(extra) - - } - // t.SectorKey (cid.Cid) (struct) - - { - - b, err := br.ReadByte() - if err != nil { - return err - } - if b != cbg.CborNull[0] { - if err := br.UnreadByte(); err != nil { - return err - } - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.SectorKey: %w", err) - } - - t.SectorKey = &c - } - - } - // t.SealedCID (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.SealedCID: %w", err) - } - - t.SealedCID = c - - } - return nil -} - -var lengthBufSealVerifyInfo = []byte{136} - -func (t *SealVerifyInfo) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufSealVerifyInfo); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.SealProof (abi.RegisteredSealProof) (int64) - if t.SealProof >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SealProof)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.SealProof-1)); err != nil { - return err - } - } - - // t.SectorID (abi.SectorID) (struct) - if err := t.SectorID.MarshalCBOR(w); err != nil { - return err - } - - // t.DealIDs ([]abi.DealID) (slice) - if len(t.DealIDs) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.DealIDs was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.DealIDs))); err != nil { - return err - } - for _, v := range t.DealIDs { - if err := cbg.CborWriteHeader(w, cbg.MajUnsignedInt, uint64(v)); err != nil { - return err - } - } - - // t.Randomness (abi.SealRandomness) (slice) - if len(t.Randomness) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.Randomness was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Randomness))); err != nil { - return err - } - - if _, err := w.Write(t.Randomness[:]); err != nil { - return err - } - - // t.InteractiveRandomness (abi.InteractiveSealRandomness) (slice) - if len(t.InteractiveRandomness) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.InteractiveRandomness was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.InteractiveRandomness))); err != nil { - return err - } - - if _, err := w.Write(t.InteractiveRandomness[:]); err != nil { - return err - } - - // t.Proof ([]uint8) (slice) - if len(t.Proof) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.Proof was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Proof))); err != nil { - return err - } - - if _, err := w.Write(t.Proof[:]); err != nil { - return err - } - - // t.SealedCID (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.SealedCID); err != nil { - return xerrors.Errorf("failed to write cid field t.SealedCID: %w", err) - } - - // t.UnsealedCID (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.UnsealedCID); err != nil { - return xerrors.Errorf("failed to write cid field t.UnsealedCID: %w", err) - } - - return nil -} - -func (t *SealVerifyInfo) UnmarshalCBOR(r io.Reader) error { - *t = SealVerifyInfo{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 8 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.SealProof (abi.RegisteredSealProof) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.SealProof = abi.RegisteredSealProof(extraI) - } - // t.SectorID (abi.SectorID) (struct) - - { - - if err := t.SectorID.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.SectorID: %w", err) - } - - } - // t.DealIDs ([]abi.DealID) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.DealIDs: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.DealIDs = make([]abi.DealID, extra) - } - - for i := 0; i < int(extra); i++ { - - maj, val, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return xerrors.Errorf("failed to read uint64 for t.DealIDs slice: %w", err) - } - - if maj != cbg.MajUnsignedInt { - return xerrors.Errorf("value read for array t.DealIDs was not a uint, instead got %d", maj) - } - - t.DealIDs[i] = abi.DealID(val) - } - - // t.Randomness (abi.SealRandomness) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.Randomness: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - - if extra > 0 { - t.Randomness = make([]uint8, extra) - } - - if _, err := io.ReadFull(br, t.Randomness[:]); err != nil { - return err - } - // t.InteractiveRandomness (abi.InteractiveSealRandomness) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.InteractiveRandomness: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - - if extra > 0 { - t.InteractiveRandomness = make([]uint8, extra) - } - - if _, err := io.ReadFull(br, t.InteractiveRandomness[:]); err != nil { - return err - } - // t.Proof ([]uint8) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.Proof: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - - if extra > 0 { - t.Proof = make([]uint8, extra) - } - - if _, err := io.ReadFull(br, t.Proof[:]); err != nil { - return err - } - // t.SealedCID (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.SealedCID: %w", err) - } - - t.SealedCID = c - - } - // t.UnsealedCID (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.UnsealedCID: %w", err) - } - - t.UnsealedCID = c - - } - return nil -} - -var lengthBufWindowPoStVerifyInfo = []byte{132} - -func (t *WindowPoStVerifyInfo) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufWindowPoStVerifyInfo); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Randomness (abi.PoStRandomness) (slice) - if len(t.Randomness) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.Randomness was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Randomness))); err != nil { - return err - } - - if _, err := w.Write(t.Randomness[:]); err != nil { - return err - } - - // t.Proofs ([]proof.PoStProof) (slice) - if len(t.Proofs) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Proofs was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Proofs))); err != nil { - return err - } - for _, v := range t.Proofs { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - - // t.ChallengedSectors ([]proof.SectorInfo) (slice) - if len(t.ChallengedSectors) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.ChallengedSectors was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.ChallengedSectors))); err != nil { - return err - } - for _, v := range t.ChallengedSectors { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - - // t.Prover (abi.ActorID) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Prover)); err != nil { - return err - } - - return nil -} - -func (t *WindowPoStVerifyInfo) UnmarshalCBOR(r io.Reader) error { - *t = WindowPoStVerifyInfo{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 4 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Randomness (abi.PoStRandomness) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.Randomness: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - - if extra > 0 { - t.Randomness = make([]uint8, extra) - } - - if _, err := io.ReadFull(br, t.Randomness[:]); err != nil { - return err - } - // t.Proofs ([]proof.PoStProof) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Proofs: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Proofs = make([]PoStProof, extra) - } - - for i := 0; i < int(extra); i++ { - - var v PoStProof - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.Proofs[i] = v - } - - // t.ChallengedSectors ([]proof.SectorInfo) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.ChallengedSectors: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.ChallengedSectors = make([]SectorInfo, extra) - } - - for i := 0; i < int(extra); i++ { - - var v SectorInfo - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.ChallengedSectors[i] = v - } - - // t.Prover (abi.ActorID) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Prover = abi.ActorID(extra) - - } - return nil -} - -var lengthBufWinningPoStVerifyInfo = []byte{132} - -func (t *WinningPoStVerifyInfo) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufWinningPoStVerifyInfo); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Randomness (abi.PoStRandomness) (slice) - if len(t.Randomness) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.Randomness was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Randomness))); err != nil { - return err - } - - if _, err := w.Write(t.Randomness[:]); err != nil { - return err - } - - // t.Proofs ([]proof.PoStProof) (slice) - if len(t.Proofs) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Proofs was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Proofs))); err != nil { - return err - } - for _, v := range t.Proofs { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - - // t.ChallengedSectors ([]proof.SectorInfo) (slice) - if len(t.ChallengedSectors) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.ChallengedSectors was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.ChallengedSectors))); err != nil { - return err - } - for _, v := range t.ChallengedSectors { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - - // t.Prover (abi.ActorID) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Prover)); err != nil { - return err - } - - return nil -} - -func (t *WinningPoStVerifyInfo) UnmarshalCBOR(r io.Reader) error { - *t = WinningPoStVerifyInfo{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 4 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Randomness (abi.PoStRandomness) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.Randomness: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - - if extra > 0 { - t.Randomness = make([]uint8, extra) - } - - if _, err := io.ReadFull(br, t.Randomness[:]); err != nil { - return err - } - // t.Proofs ([]proof.PoStProof) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Proofs: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Proofs = make([]PoStProof, extra) - } - - for i := 0; i < int(extra); i++ { - - var v PoStProof - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.Proofs[i] = v - } - - // t.ChallengedSectors ([]proof.SectorInfo) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.ChallengedSectors: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.ChallengedSectors = make([]SectorInfo, extra) - } - - for i := 0; i < int(extra); i++ { - - var v SectorInfo - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.ChallengedSectors[i] = v - } - - // t.Prover (abi.ActorID) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Prover = abi.ActorID(extra) - - } - return nil -} - -var lengthBufSectorInfo = []byte{131} - -func (t *SectorInfo) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufSectorInfo); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.SealProof (abi.RegisteredSealProof) (int64) - if t.SealProof >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SealProof)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.SealProof-1)); err != nil { - return err - } - } - - // t.SectorNumber (abi.SectorNumber) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SectorNumber)); err != nil { - return err - } - - // t.SealedCID (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.SealedCID); err != nil { - return xerrors.Errorf("failed to write cid field t.SealedCID: %w", err) - } - - return nil -} - -func (t *SectorInfo) UnmarshalCBOR(r io.Reader) error { - *t = SectorInfo{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 3 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.SealProof (abi.RegisteredSealProof) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.SealProof = abi.RegisteredSealProof(extraI) - } - // t.SectorNumber (abi.SectorNumber) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.SectorNumber = abi.SectorNumber(extra) - - } - // t.SealedCID (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.SealedCID: %w", err) - } - - t.SealedCID = c - - } - return nil -} diff --git a/vendor/github.com/filecoin-project/go-state-types/proof/proof_types.go b/vendor/github.com/filecoin-project/go-state-types/proof/proof_types.go deleted file mode 100644 index 722a836..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/proof/proof_types.go +++ /dev/null @@ -1,78 +0,0 @@ -package proof - -import ( - "github.com/filecoin-project/go-state-types/abi" - "github.com/ipfs/go-cid" -) - -type PoStProof struct { - PoStProof abi.RegisteredPoStProof - ProofBytes []byte -} - -type SectorInfo struct { - SealProof abi.RegisteredSealProof // RegisteredProof used when sealing - needs to be mapped to PoSt registered proof when used to verify a PoSt - SectorNumber abi.SectorNumber - SealedCID cid.Cid // CommR -} - -type ExtendedSectorInfo struct { - SealProof abi.RegisteredSealProof // RegisteredProof used when sealing - needs to be mapped to PoSt registered proof when used to verify a PoSt - SectorNumber abi.SectorNumber - SectorKey *cid.Cid - SealedCID cid.Cid // CommR -} - -type WinningPoStVerifyInfo struct { - Randomness abi.PoStRandomness - Proofs []PoStProof - ChallengedSectors []SectorInfo - Prover abi.ActorID // used to derive 32-byte prover ID -} - -// Information needed to verify a Window PoSt submitted directly to a miner actor. -type WindowPoStVerifyInfo struct { - Randomness abi.PoStRandomness - Proofs []PoStProof - ChallengedSectors []SectorInfo - Prover abi.ActorID // used to derive 32-byte prover ID -} - -type SealVerifyInfo struct { - SealProof abi.RegisteredSealProof - abi.SectorID - DealIDs []abi.DealID - Randomness abi.SealRandomness - InteractiveRandomness abi.InteractiveSealRandomness - Proof []byte - - // Safe because we get those from the miner actor - SealedCID cid.Cid `checked:"true"` // CommR - UnsealedCID cid.Cid `checked:"true"` // CommD -} - -type AggregateSealVerifyInfo struct { - Number abi.SectorNumber - Randomness abi.SealRandomness - InteractiveRandomness abi.InteractiveSealRandomness - - // Safe because we get those from the miner actor - SealedCID cid.Cid `checked:"true"` // CommR - UnsealedCID cid.Cid `checked:"true"` // CommD -} - -type AggregateSealVerifyProofAndInfos struct { - Miner abi.ActorID - SealProof abi.RegisteredSealProof - AggregateProof abi.RegisteredAggregationProof - Proof []byte - Infos []AggregateSealVerifyInfo -} - -type ReplicaUpdateInfo struct { - UpdateProofType abi.RegisteredUpdateProof - OldSealedSectorCID cid.Cid - NewSealedSectorCID cid.Cid - NewUnsealedSectorCID cid.Cid - Proof []byte -} diff --git a/vendor/github.com/filecoin-project/go-state-types/rt/actor.go b/vendor/github.com/filecoin-project/go-state-types/rt/actor.go deleted file mode 100644 index 26d6c13..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/rt/actor.go +++ /dev/null @@ -1,33 +0,0 @@ -package rt - -import ( - "github.com/filecoin-project/go-state-types/cbor" - "github.com/ipfs/go-cid" -) - -// VMActor is a concrete implementation of an actor, to be used by a Filecoin -// VM. -type VMActor interface { - // Exports returns a slice of methods exported by this actor, indexed by - // method number. Skipped/deprecated method numbers will be nil. - Exports() []interface{} - - // Code returns the code ID for this actor. - Code() cid.Cid - - // State returns a new State object for this actor. This can be used to - // decode the actor's state. - State() cbor.Er - - // NOTE: methods like "IsSingleton" are intentionally excluded from this - // interface. That way, we can add additional attributes actors in newer - // specs-actors versions, without having to update previous specs-actors - // versions. -} - -// IsSingletonActor returns true if the actor is a singleton actor (i.e., cannot -// be constructed). -func IsSingletonActor(a VMActor) bool { - s, ok := a.(interface{ IsSingleton() bool }) - return ok && s.IsSingleton() -} diff --git a/vendor/github.com/filecoin-project/go-state-types/rt/log.go b/vendor/github.com/filecoin-project/go-state-types/rt/log.go deleted file mode 100644 index d2dfff8..0000000 --- a/vendor/github.com/filecoin-project/go-state-types/rt/log.go +++ /dev/null @@ -1,18 +0,0 @@ -package rt - -// Specifies importance of message, LogLevel numbering is consistent with the uber-go/zap package. -type LogLevel int - -const ( - // DebugLevel logs are typically voluminous, and are usually disabled in - // production. - DEBUG LogLevel = iota - 1 - // InfoLevel is the default logging priority. - INFO - // WarnLevel logs are more important than Info, but don't need individual - // human review. - WARN - // ErrorLevel logs are high-priority. If an application is running smoothly, - // it shouldn't generate any error-level logs. - ERROR -) diff --git a/vendor/github.com/filecoin-project/specs-actors/COPYRIGHT b/vendor/github.com/filecoin-project/specs-actors/COPYRIGHT deleted file mode 100644 index 6aa4b36..0000000 --- a/vendor/github.com/filecoin-project/specs-actors/COPYRIGHT +++ /dev/null @@ -1,3 +0,0 @@ -Copyright 2020. Protocol Labs, Inc. - -This library is dual-licensed under Apache 2.0 and MIT terms. diff --git a/vendor/github.com/filecoin-project/specs-actors/LICENSE-APACHE b/vendor/github.com/filecoin-project/specs-actors/LICENSE-APACHE deleted file mode 100644 index 22608cf..0000000 --- a/vendor/github.com/filecoin-project/specs-actors/LICENSE-APACHE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2020. Protocol Labs, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/github.com/filecoin-project/specs-actors/LICENSE-MIT b/vendor/github.com/filecoin-project/specs-actors/LICENSE-MIT deleted file mode 100644 index c6134ad..0000000 --- a/vendor/github.com/filecoin-project/specs-actors/LICENSE-MIT +++ /dev/null @@ -1,19 +0,0 @@ -Copyright 2020. Protocol Labs, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/filecoin-project/specs-actors/actors/runtime/proof/cbor_gen.go b/vendor/github.com/filecoin-project/specs-actors/actors/runtime/proof/cbor_gen.go deleted file mode 100644 index 76b5f9d..0000000 --- a/vendor/github.com/filecoin-project/specs-actors/actors/runtime/proof/cbor_gen.go +++ /dev/null @@ -1,857 +0,0 @@ -// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. - -package proof - -import ( - "fmt" - "io" - - abi "github.com/filecoin-project/go-state-types/abi" - cbg "github.com/whyrusleeping/cbor-gen" - xerrors "golang.org/x/xerrors" -) - -var _ = xerrors.Errorf - -var lengthBufSectorInfo = []byte{131} - -func (t *SectorInfo) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufSectorInfo); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.SealProof (abi.RegisteredSealProof) (int64) - if t.SealProof >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SealProof)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.SealProof-1)); err != nil { - return err - } - } - - // t.SectorNumber (abi.SectorNumber) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SectorNumber)); err != nil { - return err - } - - // t.SealedCID (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.SealedCID); err != nil { - return xerrors.Errorf("failed to write cid field t.SealedCID: %w", err) - } - - return nil -} - -func (t *SectorInfo) UnmarshalCBOR(r io.Reader) error { - *t = SectorInfo{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 3 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.SealProof (abi.RegisteredSealProof) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.SealProof = abi.RegisteredSealProof(extraI) - } - // t.SectorNumber (abi.SectorNumber) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.SectorNumber = abi.SectorNumber(extra) - - } - // t.SealedCID (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.SealedCID: %w", err) - } - - t.SealedCID = c - - } - return nil -} - -var lengthBufSealVerifyInfo = []byte{136} - -func (t *SealVerifyInfo) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufSealVerifyInfo); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.SealProof (abi.RegisteredSealProof) (int64) - if t.SealProof >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SealProof)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.SealProof-1)); err != nil { - return err - } - } - - // t.SectorID (abi.SectorID) (struct) - if err := t.SectorID.MarshalCBOR(w); err != nil { - return err - } - - // t.DealIDs ([]abi.DealID) (slice) - if len(t.DealIDs) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.DealIDs was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.DealIDs))); err != nil { - return err - } - for _, v := range t.DealIDs { - if err := cbg.CborWriteHeader(w, cbg.MajUnsignedInt, uint64(v)); err != nil { - return err - } - } - - // t.Randomness (abi.SealRandomness) (slice) - if len(t.Randomness) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.Randomness was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Randomness))); err != nil { - return err - } - - if _, err := w.Write(t.Randomness[:]); err != nil { - return err - } - - // t.InteractiveRandomness (abi.InteractiveSealRandomness) (slice) - if len(t.InteractiveRandomness) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.InteractiveRandomness was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.InteractiveRandomness))); err != nil { - return err - } - - if _, err := w.Write(t.InteractiveRandomness[:]); err != nil { - return err - } - - // t.Proof ([]uint8) (slice) - if len(t.Proof) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.Proof was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Proof))); err != nil { - return err - } - - if _, err := w.Write(t.Proof[:]); err != nil { - return err - } - - // t.SealedCID (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.SealedCID); err != nil { - return xerrors.Errorf("failed to write cid field t.SealedCID: %w", err) - } - - // t.UnsealedCID (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.UnsealedCID); err != nil { - return xerrors.Errorf("failed to write cid field t.UnsealedCID: %w", err) - } - - return nil -} - -func (t *SealVerifyInfo) UnmarshalCBOR(r io.Reader) error { - *t = SealVerifyInfo{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 8 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.SealProof (abi.RegisteredSealProof) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.SealProof = abi.RegisteredSealProof(extraI) - } - // t.SectorID (abi.SectorID) (struct) - - { - - if err := t.SectorID.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.SectorID: %w", err) - } - - } - // t.DealIDs ([]abi.DealID) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.DealIDs: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.DealIDs = make([]abi.DealID, extra) - } - - for i := 0; i < int(extra); i++ { - - maj, val, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return xerrors.Errorf("failed to read uint64 for t.DealIDs slice: %w", err) - } - - if maj != cbg.MajUnsignedInt { - return xerrors.Errorf("value read for array t.DealIDs was not a uint, instead got %d", maj) - } - - t.DealIDs[i] = abi.DealID(val) - } - - // t.Randomness (abi.SealRandomness) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.Randomness: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - - if extra > 0 { - t.Randomness = make([]uint8, extra) - } - - if _, err := io.ReadFull(br, t.Randomness[:]); err != nil { - return err - } - // t.InteractiveRandomness (abi.InteractiveSealRandomness) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.InteractiveRandomness: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - - if extra > 0 { - t.InteractiveRandomness = make([]uint8, extra) - } - - if _, err := io.ReadFull(br, t.InteractiveRandomness[:]); err != nil { - return err - } - // t.Proof ([]uint8) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.Proof: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - - if extra > 0 { - t.Proof = make([]uint8, extra) - } - - if _, err := io.ReadFull(br, t.Proof[:]); err != nil { - return err - } - // t.SealedCID (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.SealedCID: %w", err) - } - - t.SealedCID = c - - } - // t.UnsealedCID (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.UnsealedCID: %w", err) - } - - t.UnsealedCID = c - - } - return nil -} - -var lengthBufPoStProof = []byte{130} - -func (t *PoStProof) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufPoStProof); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.PoStProof (abi.RegisteredPoStProof) (int64) - if t.PoStProof >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.PoStProof)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.PoStProof-1)); err != nil { - return err - } - } - - // t.ProofBytes ([]uint8) (slice) - if len(t.ProofBytes) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.ProofBytes was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.ProofBytes))); err != nil { - return err - } - - if _, err := w.Write(t.ProofBytes[:]); err != nil { - return err - } - return nil -} - -func (t *PoStProof) UnmarshalCBOR(r io.Reader) error { - *t = PoStProof{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.PoStProof (abi.RegisteredPoStProof) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.PoStProof = abi.RegisteredPoStProof(extraI) - } - // t.ProofBytes ([]uint8) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.ProofBytes: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - - if extra > 0 { - t.ProofBytes = make([]uint8, extra) - } - - if _, err := io.ReadFull(br, t.ProofBytes[:]); err != nil { - return err - } - return nil -} - -var lengthBufWindowPoStVerifyInfo = []byte{132} - -func (t *WindowPoStVerifyInfo) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufWindowPoStVerifyInfo); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Randomness (abi.PoStRandomness) (slice) - if len(t.Randomness) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.Randomness was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Randomness))); err != nil { - return err - } - - if _, err := w.Write(t.Randomness[:]); err != nil { - return err - } - - // t.Proofs ([]proof.PoStProof) (slice) - if len(t.Proofs) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Proofs was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Proofs))); err != nil { - return err - } - for _, v := range t.Proofs { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - - // t.ChallengedSectors ([]proof.SectorInfo) (slice) - if len(t.ChallengedSectors) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.ChallengedSectors was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.ChallengedSectors))); err != nil { - return err - } - for _, v := range t.ChallengedSectors { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - - // t.Prover (abi.ActorID) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Prover)); err != nil { - return err - } - - return nil -} - -func (t *WindowPoStVerifyInfo) UnmarshalCBOR(r io.Reader) error { - *t = WindowPoStVerifyInfo{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 4 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Randomness (abi.PoStRandomness) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.Randomness: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - - if extra > 0 { - t.Randomness = make([]uint8, extra) - } - - if _, err := io.ReadFull(br, t.Randomness[:]); err != nil { - return err - } - // t.Proofs ([]proof.PoStProof) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Proofs: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Proofs = make([]PoStProof, extra) - } - - for i := 0; i < int(extra); i++ { - - var v PoStProof - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.Proofs[i] = v - } - - // t.ChallengedSectors ([]proof.SectorInfo) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.ChallengedSectors: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.ChallengedSectors = make([]SectorInfo, extra) - } - - for i := 0; i < int(extra); i++ { - - var v SectorInfo - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.ChallengedSectors[i] = v - } - - // t.Prover (abi.ActorID) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Prover = abi.ActorID(extra) - - } - return nil -} - -var lengthBufWinningPoStVerifyInfo = []byte{132} - -func (t *WinningPoStVerifyInfo) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufWinningPoStVerifyInfo); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Randomness (abi.PoStRandomness) (slice) - if len(t.Randomness) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.Randomness was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Randomness))); err != nil { - return err - } - - if _, err := w.Write(t.Randomness[:]); err != nil { - return err - } - - // t.Proofs ([]proof.PoStProof) (slice) - if len(t.Proofs) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Proofs was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Proofs))); err != nil { - return err - } - for _, v := range t.Proofs { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - - // t.ChallengedSectors ([]proof.SectorInfo) (slice) - if len(t.ChallengedSectors) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.ChallengedSectors was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.ChallengedSectors))); err != nil { - return err - } - for _, v := range t.ChallengedSectors { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - - // t.Prover (abi.ActorID) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Prover)); err != nil { - return err - } - - return nil -} - -func (t *WinningPoStVerifyInfo) UnmarshalCBOR(r io.Reader) error { - *t = WinningPoStVerifyInfo{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 4 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Randomness (abi.PoStRandomness) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.Randomness: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - - if extra > 0 { - t.Randomness = make([]uint8, extra) - } - - if _, err := io.ReadFull(br, t.Randomness[:]); err != nil { - return err - } - // t.Proofs ([]proof.PoStProof) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Proofs: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Proofs = make([]PoStProof, extra) - } - - for i := 0; i < int(extra); i++ { - - var v PoStProof - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.Proofs[i] = v - } - - // t.ChallengedSectors ([]proof.SectorInfo) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.ChallengedSectors: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.ChallengedSectors = make([]SectorInfo, extra) - } - - for i := 0; i < int(extra); i++ { - - var v SectorInfo - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.ChallengedSectors[i] = v - } - - // t.Prover (abi.ActorID) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Prover = abi.ActorID(extra) - - } - return nil -} diff --git a/vendor/github.com/filecoin-project/specs-actors/actors/runtime/proof/verify.go b/vendor/github.com/filecoin-project/specs-actors/actors/runtime/proof/verify.go deleted file mode 100644 index 121face..0000000 --- a/vendor/github.com/filecoin-project/specs-actors/actors/runtime/proof/verify.go +++ /dev/null @@ -1,57 +0,0 @@ -package proof - -import ( - "github.com/filecoin-project/go-state-types/abi" - "github.com/ipfs/go-cid" -) - -/// -/// Sealing -/// - -// Information needed to verify a seal proof. -type SealVerifyInfo struct { - SealProof abi.RegisteredSealProof - abi.SectorID - DealIDs []abi.DealID - Randomness abi.SealRandomness - InteractiveRandomness abi.InteractiveSealRandomness - Proof []byte - - // Safe because we get those from the miner actor - SealedCID cid.Cid `checked:"true"` // CommR - UnsealedCID cid.Cid `checked:"true"` // CommD -} - -/// -/// PoSting -/// - -// Information about a proof necessary for PoSt verification. -type SectorInfo struct { - SealProof abi.RegisteredSealProof // RegisteredProof used when sealing - needs to be mapped to PoSt registered proof when used to verify a PoSt - SectorNumber abi.SectorNumber - SealedCID cid.Cid // CommR -} - -type PoStProof struct { - PoStProof abi.RegisteredPoStProof - ProofBytes []byte -} - -// Information needed to verify a Winning PoSt attached to a block header. -// Note: this is not used within the state machine, but by the consensus/election mechanisms. -type WinningPoStVerifyInfo struct { - Randomness abi.PoStRandomness - Proofs []PoStProof - ChallengedSectors []SectorInfo - Prover abi.ActorID // used to derive 32-byte prover ID -} - -// Information needed to verify a Window PoSt submitted directly to a miner actor. -type WindowPoStVerifyInfo struct { - Randomness abi.PoStRandomness - Proofs []PoStProof - ChallengedSectors []SectorInfo - Prover abi.ActorID // used to derive 32-byte prover ID -} diff --git a/vendor/github.com/filecoin-project/specs-actors/actors/runtime/runtime.go b/vendor/github.com/filecoin-project/specs-actors/actors/runtime/runtime.go deleted file mode 100644 index d857a9c..0000000 --- a/vendor/github.com/filecoin-project/specs-actors/actors/runtime/runtime.go +++ /dev/null @@ -1,266 +0,0 @@ -package runtime - -import ( - "bytes" - "context" - "io" - - "github.com/filecoin-project/go-address" - addr "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/cbor" - "github.com/filecoin-project/go-state-types/crypto" - "github.com/filecoin-project/go-state-types/exitcode" - "github.com/filecoin-project/go-state-types/network" - "github.com/filecoin-project/go-state-types/rt" - cid "github.com/ipfs/go-cid" - - "github.com/filecoin-project/specs-actors/actors/runtime/proof" -) - -// Runtime is the VM's internal runtime object. -// this is everything that is accessible to actors, beyond parameters. -type Runtime interface { - // Information related to the current message being executed. - // When an actor invokes a method on another actor as a sub-call, these values reflect - // the sub-call context, rather than the top-level context. - Message - - // Provides a handle for the actor's state object. - StateHandle - - // Provides IPLD storage for actor state - Store - - // Provides the system call interface. - Syscalls - - // The network protocol version number at the current epoch. - NetworkVersion() network.Version - - // The current chain epoch number. The genesis block has epoch zero. - CurrEpoch() abi.ChainEpoch - - // Satisfies the requirement that every exported actor method must invoke at least one caller validation - // method before returning, without making any assertions about the caller. - ValidateImmediateCallerAcceptAny() - - // Validates that the immediate caller's address exactly matches one of a set of expected addresses, - // aborting if it does not. - // The caller address is always normalized to an ID address, so expected addresses must be - // ID addresses to have any expectation of passing validation. - ValidateImmediateCallerIs(addrs ...addr.Address) - - // Validates that the immediate caller is an actor with code CID matching one of a set of - // expected CIDs, aborting if it does not. - ValidateImmediateCallerType(types ...cid.Cid) - - // The balance of the receiver. Always >= zero. - CurrentBalance() abi.TokenAmount - - // Resolves an address of any protocol to an ID address (via the Init actor's table). - // This allows resolution of externally-provided SECP, BLS, or actor addresses to the canonical form. - // If the argument is an ID address it is returned directly. - ResolveAddress(address addr.Address) (addr.Address, bool) - - // Look up the code ID at an actor address. - // The address will be resolved as if via ResolveAddress, if necessary, so need not be an ID-address. - GetActorCodeCID(addr addr.Address) (ret cid.Cid, ok bool) - - // GetRandomnessFromBeacon returns a (pseudo)random byte array drawing from a random beacon at a prior epoch. - // The beacon value is combined with the personalization tag, epoch number, and explicitly provided entropy. - // The personalization tag may be any int64 value. - // The epoch must be less than the current epoch. The epoch may be negative, in which case - // it addresses the beacon value from genesis block. - // The entropy may be any byte array, or nil. - GetRandomnessFromBeacon(personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) abi.Randomness - - // GetRandomnessFromTickets samples randomness from the ticket chain. Randomess - // sampled through this method is unique per potential fork, and as a - // result, processes relying on this randomness are tied to whichever fork - // they choose. - // See GetRandomnessFromBeacon for notes about the personalization tag, epoch, and entropy. - GetRandomnessFromTickets(personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) abi.Randomness - - // Sends a message to another actor, returning the exit code and return value envelope. - // If the invoked method does not return successfully, its state changes (and that of any messages it sent in turn) - // will be rolled back. - Send(toAddr addr.Address, methodNum abi.MethodNum, params cbor.Marshaler, value abi.TokenAmount, out cbor.Er) exitcode.ExitCode - - // Halts execution upon an error from which the receiver cannot recover. The caller will receive the exitcode and - // an empty return value. State changes made within this call will be rolled back. - // This method does not return. - // The provided exit code must be >= exitcode.FirstActorExitCode. - // The message and args are for diagnostic purposes and do not persist on chain. They should be suitable for - // passing to fmt.Errorf(msg, args...). - Abortf(errExitCode exitcode.ExitCode, msg string, args ...interface{}) - - // Computes an address for a new actor. The returned address is intended to uniquely refer to - // the actor even in the event of a chain re-org (whereas an ID-address might refer to a - // different actor after messages are re-ordered). - // Always an ActorExec address. - NewActorAddress() addr.Address - - // Creates an actor with code `codeID` and address `address`, with empty state. - // May only be called by Init actor. - // Aborts if the provided address has previously been created. - CreateActor(codeId cid.Cid, address addr.Address) - - // Deletes the executing actor from the state tree, transferring any balance to beneficiary. - // Aborts if the beneficiary does not exist or is the calling actor. - // May only be called by the actor itself. - DeleteActor(beneficiary addr.Address) - - // Returns the total token supply in circulation at the beginning of the current epoch. - // The circulating supply is the sum of: - // - rewards emitted by the reward actor, - // - funds vested from lock-ups in the genesis state, - // less the sum of: - // - funds burnt, - // - pledge collateral locked in storage miner actors (recorded in the storage power actor) - // - deal collateral locked by the storage market actor - TotalFilCircSupply() abi.TokenAmount - - // Provides a Go context for use by HAMT, etc. - // The VM is intended to provide an idealised machine abstraction, with infinite storage etc, so this context - // should not be used by actor code directly. - Context() context.Context - - // Starts a new tracing span. The span must be End()ed explicitly by invoking or deferring EndSpan - StartSpan(name string) (EndSpan func()) - - // ChargeGas charges specified amount of `gas` for execution. - // `name` provides information about gas charging point - // `virtual` sets virtual amount of gas to charge, this amount is not counted - // toward execution cost. This functionality is used for observing global changes - // in total gas charged if amount of gas charged was to be changed. - ChargeGas(name string, gas int64, virtual int64) - - // Note events that may make debugging easier - Log(level rt.LogLevel, msg string, args ...interface{}) -} - -// Store defines the storage module exposed to actors. -type Store interface { - // Retrieves and deserializes an object from the store into `o`. Returns whether successful. - StoreGet(c cid.Cid, o cbor.Unmarshaler) bool - // Serializes and stores an object, returning its CID. - StorePut(x cbor.Marshaler) cid.Cid -} - -// Message contains information available to the actor about the executing message. -// These values are fixed for the duration of an invocation. -type Message interface { - // The address of the immediate calling actor. Always an ID-address. - // If an actor invokes its own method, Caller() == Receiver(). - Caller() addr.Address - - // The address of the actor receiving the message. Always an ID-address. - Receiver() addr.Address - - // The value attached to the message being processed, implicitly added to CurrentBalance() - // of Receiver() before method invocation. - // This value came from Caller(). - ValueReceived() abi.TokenAmount -} - -// Pure functions implemented as primitives by the runtime. -type Syscalls interface { - // Verifies that a signature is valid for an address and plaintext. - // If the address is a public-key type address, it is used directly. - // If it's an ID-address, the actor is looked up in state. It must be an account actor, and the - // public key is obtained from it's state. - VerifySignature(signature crypto.Signature, signer addr.Address, plaintext []byte) error - // Hashes input data using blake2b with 256 bit output. - HashBlake2b(data []byte) [32]byte - // Computes an unsealed sector CID (CommD) from its constituent piece CIDs (CommPs) and sizes. - ComputeUnsealedSectorCID(reg abi.RegisteredSealProof, pieces []abi.PieceInfo) (cid.Cid, error) - // Verifies a sector seal proof. - VerifySeal(vi proof.SealVerifyInfo) error - - BatchVerifySeals(vis map[address.Address][]proof.SealVerifyInfo) (map[address.Address][]bool, error) - - // Verifies a proof of spacetime. - VerifyPoSt(vi proof.WindowPoStVerifyInfo) error - // Verifies that two block headers provide proof of a consensus fault: - // - both headers mined by the same actor - // - headers are different - // - first header is of the same or lower epoch as the second - // - the headers provide evidence of a fault (see the spec for the different fault types). - // The parameters are all serialized block headers. The third "extra" parameter is consulted only for - // the "parent grinding fault", in which case it must be the sibling of h1 (same parent tipset) and one of the - // blocks in an ancestor of h2. - // Returns nil and an error if the headers don't prove a fault. - VerifyConsensusFault(h1, h2, extra []byte) (*ConsensusFault, error) -} - -// StateHandle provides mutable, exclusive access to actor state. -type StateHandle interface { - // Create initializes the state object. - // This is only valid in a constructor function and when the state has not yet been initialized. - StateCreate(obj cbor.Marshaler) - - // Readonly loads a readonly copy of the state into the argument. - // - // Any modification to the state is illegal and will result in an abort. - StateReadonly(obj cbor.Unmarshaler) - - // Transaction loads a mutable version of the state into the `obj` argument and protects - // the execution from side effects (including message send). - // - // The second argument is a function which allows the caller to mutate the state. - // - // If the state is modified after this function returns, execution will abort. - // - // The gas cost of this method is that of a Store.Put of the mutated state object. - // - // Note: the Go signature is not ideal due to lack of type system power. - // - // # Usage - // ```go - // var state SomeState - // ret := rt.StateTransaction(&state, func() (interface{}) { - // // make some changes - // st.ImLoaded = True - // return st.Thing, nil - // }) - // // state.ImLoaded = False // BAD!! state is readonly outside the lambda, it will panic - // ``` - StateTransaction(obj cbor.Er, f func()) -} - -// Result of checking two headers for a consensus fault. -type ConsensusFault struct { - // Address of the miner at fault (always an ID address). - Target addr.Address - // Epoch of the fault, which is the higher epoch of the two blocks causing it. - Epoch abi.ChainEpoch - // Type of fault. - Type ConsensusFaultType -} - -type ConsensusFaultType int64 - -const ( - //ConsensusFaultNone ConsensusFaultType = 0 - ConsensusFaultDoubleForkMining ConsensusFaultType = 1 - ConsensusFaultParentGrinding ConsensusFaultType = 2 - ConsensusFaultTimeOffsetMining ConsensusFaultType = 3 -) - -// Wraps already-serialized bytes as CBOR-marshalable. -type CBORBytes []byte - -func (b CBORBytes) MarshalCBOR(w io.Writer) error { - _, err := w.Write(b) - return err -} - -func (b *CBORBytes) UnmarshalCBOR(r io.Reader) error { - var c bytes.Buffer - _, err := c.ReadFrom(r) - *b = c.Bytes() - return err -} - -type VMActor = rt.VMActor diff --git a/vendor/github.com/filecoin-project/specs-actors/v2/COPYRIGHT b/vendor/github.com/filecoin-project/specs-actors/v2/COPYRIGHT deleted file mode 100644 index 6aa4b36..0000000 --- a/vendor/github.com/filecoin-project/specs-actors/v2/COPYRIGHT +++ /dev/null @@ -1,3 +0,0 @@ -Copyright 2020. Protocol Labs, Inc. - -This library is dual-licensed under Apache 2.0 and MIT terms. diff --git a/vendor/github.com/filecoin-project/specs-actors/v2/LICENSE-APACHE b/vendor/github.com/filecoin-project/specs-actors/v2/LICENSE-APACHE deleted file mode 100644 index 22608cf..0000000 --- a/vendor/github.com/filecoin-project/specs-actors/v2/LICENSE-APACHE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2020. Protocol Labs, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/github.com/filecoin-project/specs-actors/v2/LICENSE-MIT b/vendor/github.com/filecoin-project/specs-actors/v2/LICENSE-MIT deleted file mode 100644 index c6134ad..0000000 --- a/vendor/github.com/filecoin-project/specs-actors/v2/LICENSE-MIT +++ /dev/null @@ -1,19 +0,0 @@ -Copyright 2020. Protocol Labs, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/filecoin-project/specs-actors/v2/actors/runtime/proof/verify.go b/vendor/github.com/filecoin-project/specs-actors/v2/actors/runtime/proof/verify.go deleted file mode 100644 index baff786..0000000 --- a/vendor/github.com/filecoin-project/specs-actors/v2/actors/runtime/proof/verify.go +++ /dev/null @@ -1,61 +0,0 @@ -package proof - -import ( - proof0 "github.com/filecoin-project/specs-actors/actors/runtime/proof" -) - -/// -/// Sealing -/// - -// Information needed to verify a seal proof. -//type SealVerifyInfo struct { -// SealProof abi.RegisteredSealProof -// abi.SectorID -// DealIDs []abi.DealID -// Randomness abi.SealRandomness -// InteractiveRandomness abi.InteractiveSealRandomness -// Proof []byte -// -// // Safe because we get those from the miner actor -// SealedCID cid.Cid `checked:"true"` // CommR -// UnsealedCID cid.Cid `checked:"true"` // CommD -//} -type SealVerifyInfo = proof0.SealVerifyInfo - -/// -/// PoSting -/// - -// Information about a proof necessary for PoSt verification. -//type SectorInfo struct { -// SealProof abi.RegisteredSealProof // RegisteredProof used when sealing - needs to be mapped to PoSt registered proof when used to verify a PoSt -// SectorNumber abi.SectorNumber -// SealedCID cid.Cid // CommR -//} -type SectorInfo = proof0.SectorInfo - -//type PoStProof struct { -// PoStProof abi.RegisteredPoStProof -// ProofBytes []byte -//} -type PoStProof = proof0.PoStProof - -// Information needed to verify a Winning PoSt attached to a block header. -// Note: this is not used within the state machine, but by the consensus/election mechanisms. -//type WinningPoStVerifyInfo struct { -// Randomness abi.PoStRandomness -// Proofs []PoStProof -// ChallengedSectors []SectorInfo -// Prover abi.ActorID // used to derive 32-byte prover ID -//} -type WinningPoStVerifyInfo = proof0.WinningPoStVerifyInfo - -// Information needed to verify a Window PoSt submitted directly to a miner actor. -//type WindowPoStVerifyInfo struct { -// Randomness abi.PoStRandomness -// Proofs []PoStProof -// ChallengedSectors []SectorInfo -// Prover abi.ActorID // used to derive 32-byte prover ID -//} -type WindowPoStVerifyInfo = proof0.WindowPoStVerifyInfo diff --git a/vendor/github.com/filecoin-project/specs-actors/v5/COPYRIGHT b/vendor/github.com/filecoin-project/specs-actors/v5/COPYRIGHT deleted file mode 100644 index 6aa4b36..0000000 --- a/vendor/github.com/filecoin-project/specs-actors/v5/COPYRIGHT +++ /dev/null @@ -1,3 +0,0 @@ -Copyright 2020. Protocol Labs, Inc. - -This library is dual-licensed under Apache 2.0 and MIT terms. diff --git a/vendor/github.com/filecoin-project/specs-actors/v5/LICENSE-APACHE b/vendor/github.com/filecoin-project/specs-actors/v5/LICENSE-APACHE deleted file mode 100644 index 22608cf..0000000 --- a/vendor/github.com/filecoin-project/specs-actors/v5/LICENSE-APACHE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2020. Protocol Labs, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/github.com/filecoin-project/specs-actors/v5/LICENSE-MIT b/vendor/github.com/filecoin-project/specs-actors/v5/LICENSE-MIT deleted file mode 100644 index c6134ad..0000000 --- a/vendor/github.com/filecoin-project/specs-actors/v5/LICENSE-MIT +++ /dev/null @@ -1,19 +0,0 @@ -Copyright 2020. Protocol Labs, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/filecoin-project/specs-actors/v5/actors/runtime/proof/verify.go b/vendor/github.com/filecoin-project/specs-actors/v5/actors/runtime/proof/verify.go deleted file mode 100644 index c4ef6c6..0000000 --- a/vendor/github.com/filecoin-project/specs-actors/v5/actors/runtime/proof/verify.go +++ /dev/null @@ -1,81 +0,0 @@ -package proof - -import ( - "github.com/filecoin-project/go-state-types/abi" - proof0 "github.com/filecoin-project/specs-actors/actors/runtime/proof" - "github.com/ipfs/go-cid" -) - -/// -/// Sealing -/// - -// Information needed to verify a seal proof. -//type SealVerifyInfo struct { -// SealProof abi.RegisteredSealProof -// abi.SectorID -// DealIDs []abi.DealID -// Randomness abi.SealRandomness -// InteractiveRandomness abi.InteractiveSealRandomness -// Proof []byte -// -// // Safe because we get those from the miner actor -// SealedCID cid.Cid `checked:"true"` // CommR -// UnsealedCID cid.Cid `checked:"true"` // CommD -//} -type SealVerifyInfo = proof0.SealVerifyInfo - -type AggregateSealVerifyInfo struct { - Number abi.SectorNumber - Randomness abi.SealRandomness - InteractiveRandomness abi.InteractiveSealRandomness - - // Safe because we get those from the miner actor - SealedCID cid.Cid `checked:"true"` // CommR - UnsealedCID cid.Cid `checked:"true"` // CommD -} - -type AggregateSealVerifyProofAndInfos struct { - Miner abi.ActorID - SealProof abi.RegisteredSealProof - AggregateProof abi.RegisteredAggregationProof - Proof []byte - Infos []AggregateSealVerifyInfo -} - -/// -/// PoSting -/// - -// Information about a proof necessary for PoSt verification. -//type SectorInfo struct { -// SealProof abi.RegisteredSealProof // RegisteredProof used when sealing - needs to be mapped to PoSt registered proof when used to verify a PoSt -// SectorNumber abi.SectorNumber -// SealedCID cid.Cid // CommR -//} -type SectorInfo = proof0.SectorInfo - -//type PoStProof struct { -// PoStProof abi.RegisteredPoStProof -// ProofBytes []byte -//} -type PoStProof = proof0.PoStProof - -// Information needed to verify a Winning PoSt attached to a block header. -// Note: this is not used within the state machine, but by the consensus/election mechanisms. -//type WinningPoStVerifyInfo struct { -// Randomness abi.PoStRandomness -// Proofs []PoStProof -// ChallengedSectors []SectorInfo -// Prover abi.ActorID // used to derive 32-byte prover ID -//} -type WinningPoStVerifyInfo = proof0.WinningPoStVerifyInfo - -// Information needed to verify a Window PoSt submitted directly to a miner actor. -//type WindowPoStVerifyInfo struct { -// Randomness abi.PoStRandomness -// Proofs []PoStProof -// ChallengedSectors []SectorInfo -// Prover abi.ActorID // used to derive 32-byte prover ID -//} -type WindowPoStVerifyInfo = proof0.WindowPoStVerifyInfo diff --git a/vendor/github.com/filecoin-project/specs-actors/v7/COPYRIGHT b/vendor/github.com/filecoin-project/specs-actors/v7/COPYRIGHT deleted file mode 100644 index 6aa4b36..0000000 --- a/vendor/github.com/filecoin-project/specs-actors/v7/COPYRIGHT +++ /dev/null @@ -1,3 +0,0 @@ -Copyright 2020. Protocol Labs, Inc. - -This library is dual-licensed under Apache 2.0 and MIT terms. diff --git a/vendor/github.com/filecoin-project/specs-actors/v7/LICENSE-APACHE b/vendor/github.com/filecoin-project/specs-actors/v7/LICENSE-APACHE deleted file mode 100644 index 22608cf..0000000 --- a/vendor/github.com/filecoin-project/specs-actors/v7/LICENSE-APACHE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2020. Protocol Labs, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/github.com/filecoin-project/specs-actors/v7/LICENSE-MIT b/vendor/github.com/filecoin-project/specs-actors/v7/LICENSE-MIT deleted file mode 100644 index c6134ad..0000000 --- a/vendor/github.com/filecoin-project/specs-actors/v7/LICENSE-MIT +++ /dev/null @@ -1,19 +0,0 @@ -Copyright 2020. Protocol Labs, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/filecoin-project/specs-actors/v7/actors/runtime/proof/cbor_gen.go b/vendor/github.com/filecoin-project/specs-actors/v7/actors/runtime/proof/cbor_gen.go deleted file mode 100644 index d63a998..0000000 --- a/vendor/github.com/filecoin-project/specs-actors/v7/actors/runtime/proof/cbor_gen.go +++ /dev/null @@ -1,159 +0,0 @@ -// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. - -package proof - -import ( - "fmt" - "io" - - abi "github.com/filecoin-project/go-state-types/abi" - cbg "github.com/whyrusleeping/cbor-gen" - xerrors "golang.org/x/xerrors" -) - -var _ = xerrors.Errorf - -var lengthBufExtendedSectorInfo = []byte{132} - -func (t *ExtendedSectorInfo) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufExtendedSectorInfo); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.SealProof (abi.RegisteredSealProof) (int64) - if t.SealProof >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SealProof)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.SealProof-1)); err != nil { - return err - } - } - - // t.SectorNumber (abi.SectorNumber) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SectorNumber)); err != nil { - return err - } - - // t.SectorKey (cid.Cid) (struct) - - if t.SectorKey == nil { - if _, err := w.Write(cbg.CborNull); err != nil { - return err - } - } else { - if err := cbg.WriteCidBuf(scratch, w, *t.SectorKey); err != nil { - return xerrors.Errorf("failed to write cid field t.SectorKey: %w", err) - } - } - - // t.SealedCID (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.SealedCID); err != nil { - return xerrors.Errorf("failed to write cid field t.SealedCID: %w", err) - } - - return nil -} - -func (t *ExtendedSectorInfo) UnmarshalCBOR(r io.Reader) error { - *t = ExtendedSectorInfo{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 4 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.SealProof (abi.RegisteredSealProof) (int64) - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.SealProof = abi.RegisteredSealProof(extraI) - } - // t.SectorNumber (abi.SectorNumber) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.SectorNumber = abi.SectorNumber(extra) - - } - // t.SectorKey (cid.Cid) (struct) - - { - - b, err := br.ReadByte() - if err != nil { - return err - } - if b != cbg.CborNull[0] { - if err := br.UnreadByte(); err != nil { - return err - } - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.SectorKey: %w", err) - } - - t.SectorKey = &c - } - - } - // t.SealedCID (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.SealedCID: %w", err) - } - - t.SealedCID = c - - } - return nil -} diff --git a/vendor/github.com/filecoin-project/specs-actors/v7/actors/runtime/proof/verify.go b/vendor/github.com/filecoin-project/specs-actors/v7/actors/runtime/proof/verify.go deleted file mode 100644 index e80a722..0000000 --- a/vendor/github.com/filecoin-project/specs-actors/v7/actors/runtime/proof/verify.go +++ /dev/null @@ -1,89 +0,0 @@ -package proof - -import ( - "github.com/filecoin-project/go-state-types/abi" - proof0 "github.com/filecoin-project/specs-actors/actors/runtime/proof" - proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" - "github.com/ipfs/go-cid" -) - -/// -/// Sealing -/// - -// Information needed to verify a seal proof. -//type SealVerifyInfo struct { -// SealProof abi.RegisteredSealProof -// abi.SectorID -// DealIDs []abi.DealID -// Randomness abi.SealRandomness -// InteractiveRandomness abi.InteractiveSealRandomness -// ReplicaProof []byte -// -// // Safe because we get those from the miner actor -// SealedCID cid.Cid `checked:"true"` // CommR -// UnsealedCID cid.Cid `checked:"true"` // CommD -//} -type SealVerifyInfo = proof0.SealVerifyInfo - -type AggregateSealVerifyInfo = proof5.AggregateSealVerifyInfo - -type AggregateSealVerifyProofAndInfos = proof5.AggregateSealVerifyProofAndInfos - -/// -/// Replica -/// - -// Information needed to verify a replica update - -type ReplicaUpdateInfo struct { - UpdateProofType abi.RegisteredUpdateProof - OldSealedSectorCID cid.Cid - NewSealedSectorCID cid.Cid - NewUnsealedSectorCID cid.Cid - Proof []byte -} - -/// -/// PoSting -/// - -// Information about a proof necessary for PoSt verification. -// type SectorInfo struct { -// SealProof abi.RegisteredSealProof // RegisteredProof used when sealing - needs to be mapped to PoSt registered proof when used to verify a PoSt -// SectorNumber abi.SectorNumber -// SealedCID cid.Cid // CommR -// } -type SectorInfo = proof0.SectorInfo - -type ExtendedSectorInfo struct { - SealProof abi.RegisteredSealProof // RegisteredProof used when sealing - needs to be mapped to PoSt registered proof when used to verify a PoSt - SectorNumber abi.SectorNumber - SectorKey *cid.Cid - SealedCID cid.Cid // CommR -} - -//type PoStProof struct { -// PoStProof abi.RegisteredPoStProof -// ProofBytes []byte -//} -type PoStProof = proof0.PoStProof - -// Information needed to verify a Winning PoSt attached to a block header. -// Note: this is not used within the state machine, but by the consensus/election mechanisms. -//type WinningPoStVerifyInfo struct { -// Randomness abi.PoStRandomness -// Proofs []PoStProof -// ChallengedSectors []SectorInfo -// Prover abi.ActorID // used to derive 32-byte prover ID -//} -type WinningPoStVerifyInfo = proof0.WinningPoStVerifyInfo - -// Information needed to verify a Window PoSt submitted directly to a miner actor. -//type WindowPoStVerifyInfo struct { -// Randomness abi.PoStRandomness -// Proofs []PoStProof -// ChallengedSectors []SectorInfo -// Prover abi.ActorID // used to derive 32-byte prover ID -//} -type WindowPoStVerifyInfo = proof0.WindowPoStVerifyInfo diff --git a/vendor/github.com/filecoin-project/specs-actors/v7/actors/runtime/runtime.go b/vendor/github.com/filecoin-project/specs-actors/v7/actors/runtime/runtime.go deleted file mode 100644 index 8dbc9f5..0000000 --- a/vendor/github.com/filecoin-project/specs-actors/v7/actors/runtime/runtime.go +++ /dev/null @@ -1,239 +0,0 @@ -package runtime - -import ( - "context" - - addr "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/cbor" - "github.com/filecoin-project/go-state-types/crypto" - "github.com/filecoin-project/go-state-types/exitcode" - "github.com/filecoin-project/go-state-types/network" - "github.com/filecoin-project/go-state-types/rt" - cid "github.com/ipfs/go-cid" - - proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" - "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" -) - -// Interfaces for the runtime. -// These interfaces are not aliased onto prior versions even if they match exactly. -// Go's implicit interface satisfaction should mean that a single concrete type can satisfy -// many versions at the same time. - -// Runtime is the interface to the execution environment for actor methods.. -// This is everything that is accessible to actors, beyond parameters. -type Runtime interface { - // Information related to the current message being executed. - // When an actor invokes a method on another actor as a sub-call, these values reflect - // the sub-call context, rather than the top-level context. - Message - - // Provides a handle for the actor's state object. - StateHandle - - // Provides IPLD storage for actor state - Store - - // Provides the system call interface. - Syscalls - - // The network protocol version number at the current epoch. - NetworkVersion() network.Version - - // The current chain epoch number. The genesis block has epoch zero. - CurrEpoch() abi.ChainEpoch - - // Satisfies the requirement that every exported actor method must invoke at least one caller validation - // method before returning, without making any assertions about the caller. - ValidateImmediateCallerAcceptAny() - - // Validates that the immediate caller's address exactly matches one of a set of expected addresses, - // aborting if it does not. - // The caller address is always normalized to an ID address, so expected addresses must be - // ID addresses to have any expectation of passing validation. - ValidateImmediateCallerIs(addrs ...addr.Address) - - // Validates that the immediate caller is an actor with code CID matching one of a set of - // expected CIDs, aborting if it does not. - ValidateImmediateCallerType(types ...cid.Cid) - - // The balance of the receiver. Always >= zero. - CurrentBalance() abi.TokenAmount - - // Resolves an address of any protocol to an ID address (via the Init actor's table). - // This allows resolution of externally-provided SECP, BLS, or actor addresses to the canonical form. - // If the argument is an ID address it is returned directly. - ResolveAddress(address addr.Address) (addr.Address, bool) - - // Look up the code ID at an actor address. - // The address will be resolved as if via ResolveAddress, if necessary, so need not be an ID-address. - GetActorCodeCID(addr addr.Address) (ret cid.Cid, ok bool) - - // GetRandomnessFromBeacon returns a (pseudo)random byte array drawing from a random beacon at a prior epoch. - // The beacon value is combined with the personalization tag, epoch number, and explicitly provided entropy. - // The personalization tag may be any int64 value. - // The epoch must be less than the current epoch. The epoch may be negative, in which case - // it addresses the beacon value from genesis block. - // The entropy may be any byte array, or nil. - GetRandomnessFromBeacon(personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) abi.Randomness - - // GetRandomnessFromTickets samples randomness from the ticket chain. Randomess - // sampled through this method is unique per potential fork, and as a - // result, processes relying on this randomness are tied to whichever fork - // they choose. - // See GetRandomnessFromBeacon for notes about the personalization tag, epoch, and entropy. - GetRandomnessFromTickets(personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) abi.Randomness - - // Sends a message to another actor, returning the exit code and return value envelope. - // If the invoked method does not return successfully, its state changes (and that of any messages it sent in turn) - // will be rolled back. - Send(toAddr addr.Address, methodNum abi.MethodNum, params cbor.Marshaler, value abi.TokenAmount, out cbor.Er) exitcode.ExitCode - - // Halts execution upon an error from which the receiver cannot recover. The caller will receive the exitcode and - // an empty return value. State changes made within this call will be rolled back. - // This method does not return. - // The provided exit code must be >= exitcode.FirstActorExitCode. - // The message and args are for diagnostic purposes and do not persist on chain. They should be suitable for - // passing to fmt.Errorf(msg, args...). - Abortf(errExitCode exitcode.ExitCode, msg string, args ...interface{}) - - // Computes an address for a new actor. The returned address is intended to uniquely refer to - // the actor even in the event of a chain re-org (whereas an ID-address might refer to a - // different actor after messages are re-ordered). - // Always an ActorExec address. - NewActorAddress() addr.Address - - // Creates an actor with code `codeID` and address `address`, with empty state. - // May only be called by Init actor. - // Aborts if the provided address has previously been created. - CreateActor(codeId cid.Cid, address addr.Address) - - // Deletes the executing actor from the state tree, transferring any balance to beneficiary. - // Aborts if the beneficiary does not exist or is the calling actor. - // May only be called by the actor itself. - DeleteActor(beneficiary addr.Address) - - // Returns the total token supply in circulation at the beginning of the current epoch. - // The circulating supply is the sum of: - // - rewards emitted by the reward actor, - // - funds vested from lock-ups in the genesis state, - // less the sum of: - // - funds burnt, - // - pledge collateral locked in storage miner actors (recorded in the storage power actor) - // - deal collateral locked by the storage market actor - TotalFilCircSupply() abi.TokenAmount - - // Provides a Go context for use by HAMT, etc. - // The VM is intended to provide an idealised machine abstraction, with infinite storage etc, so this context - // should not be used by actor code directly. - Context() context.Context - - // Starts a new tracing span. The span must be End()ed explicitly by invoking or deferring EndSpan - StartSpan(name string) (EndSpan func()) - - // ChargeGas charges specified amount of `gas` for execution. - // `name` provides information about gas charging point - // `virtual` sets virtual amount of gas to charge, this amount is not counted - // toward execution cost. This functionality is used for observing global changes - // in total gas charged if amount of gas charged was to be changed. - ChargeGas(name string, gas int64, virtual int64) - - // Note events that may make debugging easier - Log(level rt.LogLevel, msg string, args ...interface{}) - - // BaseFee returns the basefee value in attoFIL per unit gas for the currently exectuting tipset. - BaseFee() abi.TokenAmount -} - -// Store defines the storage module exposed to actors. -type Store interface { - // Retrieves and deserializes an object from the store into `o`. Returns whether successful. - StoreGet(c cid.Cid, o cbor.Unmarshaler) bool - // Serializes and stores an object, returning its CID. - StorePut(x cbor.Marshaler) cid.Cid -} - -// Message contains information available to the actor about the executing message. -// These values are fixed for the duration of an invocation. -type Message interface { - // The address of the immediate calling actor. Always an ID-address. - // If an actor invokes its own method, Caller() == Receiver(). - Caller() addr.Address - - // The address of the actor receiving the message. Always an ID-address. - Receiver() addr.Address - - // The value attached to the message being processed, implicitly added to CurrentBalance() - // of Receiver() before method invocation. - // This value came from Caller(). - ValueReceived() abi.TokenAmount -} - -// Pure functions implemented as primitives by the runtime. -type Syscalls interface { - // Verifies that a signature is valid for an address and plaintext. - // If the address is a public-key type address, it is used directly. - // If it's an ID-address, the actor is looked up in state. It must be an account actor, and the - // public key is obtained from its state. - VerifySignature(signature crypto.Signature, signer addr.Address, plaintext []byte) error - // Hashes input data using blake2b with 256 bit output. - HashBlake2b(data []byte) [32]byte - // Computes an unsealed sector CID (CommD) from its constituent piece CIDs (CommPs) and sizes. - ComputeUnsealedSectorCID(reg abi.RegisteredSealProof, pieces []abi.PieceInfo) (cid.Cid, error) - // Verifies a sector seal proof. - // Deprecated and un-used. - VerifySeal(vi proof.SealVerifyInfo) error - - BatchVerifySeals(vis map[addr.Address][]proof.SealVerifyInfo) (map[addr.Address][]bool, error) - VerifyAggregateSeals(aggregate proof.AggregateSealVerifyProofAndInfos) error - - VerifyReplicaUpdate(replicaInfo proof.ReplicaUpdateInfo) error - - // Verifies a proof of spacetime. - VerifyPoSt(vi proof5.WindowPoStVerifyInfo) error - // Verifies that two block headers provide proof of a consensus fault: - // - both headers mined by the same actor - // - headers are different - // - first header is of the same or lower epoch as the second - // - the headers provide evidence of a fault (see the spec for the different fault types). - // The parameters are all serialized block headers. The third "extra" parameter is consulted only for - // the "parent grinding fault", in which case it must be the sibling of h1 (same parent tipset) and one of the - // blocks in an ancestor of h2. - // Returns nil and an error if the headers don't prove a fault. - VerifyConsensusFault(h1, h2, extra []byte) (*ConsensusFault, error) -} - -// StateHandle provides mutable, exclusive access to actor state. -type StateHandle interface { - // Create initializes the state object. - // This is only valid in a constructor function and when the state has not yet been initialized. - StateCreate(obj cbor.Marshaler) - - // Readonly loads a readonly copy of the state into the argument. - // - // Any modification to the state is illegal and will result in an abort. - StateReadonly(obj cbor.Unmarshaler) - - // Transaction loads a mutable version of the state into the `obj` argument and protects - // the execution from side effects (including message send). - // - // The second argument is a function which allows the caller to mutate the state. - // - // If the state is modified after this function returns, execution will abort. - // - // The gas cost of this method is that of a Store.Put of the mutated state object. - // - // Note: the Go signature is not ideal due to lack of type system power. - // - // # Usage - // ```go - // var state SomeState - // rt.StateTransaction(&state, func() { - // // make some changes - // state.ImLoaded = true - // }) - // // state.ImLoaded = false // BAD!! state is readonly outside the lambda, it will panic - // ``` - StateTransaction(obj cbor.Er, f func()) -} diff --git a/vendor/github.com/filecoin-project/specs-actors/v7/actors/runtime/types.go b/vendor/github.com/filecoin-project/specs-actors/v7/actors/runtime/types.go deleted file mode 100644 index 37682d6..0000000 --- a/vendor/github.com/filecoin-project/specs-actors/v7/actors/runtime/types.go +++ /dev/null @@ -1,30 +0,0 @@ -package runtime - -import ( - "github.com/filecoin-project/go-state-types/rt" - runtime0 "github.com/filecoin-project/specs-actors/actors/runtime" -) - -// Concrete types associated with the runtime interface. - -// Result of checking two headers for a consensus fault. -type ConsensusFault = runtime0.ConsensusFault - -//type ConsensusFault struct { -// // Address of the miner at fault (always an ID address). -// Target addr.Address -// // Epoch of the fault, which is the higher epoch of the two blocks causing it. -// Epoch abi.ChainEpoch -// // Type of fault. -// Type ConsensusFaultType -//} - -type ConsensusFaultType = runtime0.ConsensusFaultType - -const ( - ConsensusFaultDoubleForkMining = runtime0.ConsensusFaultDoubleForkMining - ConsensusFaultParentGrinding = runtime0.ConsensusFaultParentGrinding - ConsensusFaultTimeOffsetMining = runtime0.ConsensusFaultTimeOffsetMining -) - -type VMActor = rt.VMActor diff --git a/vendor/github.com/ipfs/go-block-format/LICENSE b/vendor/github.com/ipfs/go-block-format/LICENSE deleted file mode 100644 index 8001ebe..0000000 --- a/vendor/github.com/ipfs/go-block-format/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014-2017 Juan Batiz-Benet - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/ipfs/go-block-format/README.md b/vendor/github.com/ipfs/go-block-format/README.md deleted file mode 100644 index 210ef07..0000000 --- a/vendor/github.com/ipfs/go-block-format/README.md +++ /dev/null @@ -1,38 +0,0 @@ -go-block-format -================== - -[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io) -[![](https://img.shields.io/badge/project-IPFS-blue.svg?style=flat-square)](http://ipfs.io/) -[![](https://img.shields.io/badge/freenode-%23ipfs-blue.svg?style=flat-square)](http://webchat.freenode.net/?channels=%23ipfs) -[![Coverage Status](https://codecov.io/gh/ipfs/go-block-format/branch/master/graph/badge.svg)](https://codecov.io/gh/ipfs/go-block-format/branch/master) -[![Travis CI](https://travis-ci.org/ipfs/go-block-format.svg?branch=master)](https://travis-ci.org/ipfs/go-block-format) - -> go-block-format is a set of interfaces that a type needs to implement in order to be a CID addressable block of data. - -## Lead Maintainer - -[Eric Myhre](https://github.com/warpfork) - -## Table of Contents - -- [Install](#install) -- [Usage](#usage) -- [API](#api) -- [Contribute](#contribute) -- [License](#license) - -## Install - -```sh -make install -``` - -## Contribute - -PRs are welcome! - -Small note: If editing the Readme, please conform to the [standard-readme](https://github.com/RichardLitt/standard-readme) specification. - -## License - -MIT © Juan Batiz-Benet diff --git a/vendor/github.com/ipfs/go-block-format/blocks.go b/vendor/github.com/ipfs/go-block-format/blocks.go deleted file mode 100644 index e49431c..0000000 --- a/vendor/github.com/ipfs/go-block-format/blocks.go +++ /dev/null @@ -1,86 +0,0 @@ -// Package blocks contains the lowest level of IPLD data structures. -// A block is raw data accompanied by a CID. The CID contains the multihash -// corresponding to the block. -package blocks - -import ( - "errors" - "fmt" - - cid "github.com/ipfs/go-cid" - mh "github.com/multiformats/go-multihash" -) - -// DefaultIpfsHash is the current default hash function used by IPFS. -const DefaultIpfsHash = mh.SHA2_256 - -// Hash is the global IPFS hash function. uses multihash SHA2_256, 256 bits -func Hash(data []byte) mh.Multihash { - h, err := mh.Sum(data, DefaultIpfsHash, -1) - if err != nil { - // this error can be safely ignored (panic) because multihash only fails - // from the selection of hash function. If the fn + length are valid, it - // won't error. - panic("multihash failed to hash using SHA2_256.") - } - return h -} - -// ErrWrongHash is returned when the Cid of a block is not the expected -// according to the contents. It is currently used only when debugging. -var ErrWrongHash = errors.New("data did not match given hash") - -// Block provides abstraction for blocks implementations. -type Block interface { - RawData() []byte - Cid() cid.Cid - String() string - Loggable() map[string]interface{} -} - -// A BasicBlock is a singular block of data in ipfs. It implements the Block -// interface. -type BasicBlock struct { - cid cid.Cid - data []byte -} - -// NewBlock creates a Block object from opaque data. It will hash the data. -func NewBlock(data []byte) *BasicBlock { - // TODO: fix assumptions - return &BasicBlock{data: data, cid: cid.NewCidV0(Hash(data))} -} - -// NewBlockWithCid creates a new block when the hash of the data -// is already known, this is used to save time in situations where -// we are able to be confident that the data is correct. -func NewBlockWithCid(data []byte, c cid.Cid) (*BasicBlock, error) { - return &BasicBlock{data: data, cid: c}, nil -} - -// Multihash returns the hash contained in the block CID. -func (b *BasicBlock) Multihash() mh.Multihash { - return b.cid.Hash() -} - -// RawData returns the block raw contents as a byte slice. -func (b *BasicBlock) RawData() []byte { - return b.data -} - -// Cid returns the content identifier of the block. -func (b *BasicBlock) Cid() cid.Cid { - return b.cid -} - -// String provides a human-readable representation of the block CID. -func (b *BasicBlock) String() string { - return fmt.Sprintf("[Block %s]", b.Cid()) -} - -// Loggable returns a go-log loggable item. -func (b *BasicBlock) Loggable() map[string]interface{} { - return map[string]interface{}{ - "block": b.Cid().String(), - } -} diff --git a/vendor/github.com/ipfs/go-block-format/codecov.yml b/vendor/github.com/ipfs/go-block-format/codecov.yml deleted file mode 100644 index 5f88a9e..0000000 --- a/vendor/github.com/ipfs/go-block-format/codecov.yml +++ /dev/null @@ -1,3 +0,0 @@ -coverage: - range: "50...100" -comment: off diff --git a/vendor/github.com/ipfs/go-block-format/version.json b/vendor/github.com/ipfs/go-block-format/version.json deleted file mode 100644 index 4f0adde..0000000 --- a/vendor/github.com/ipfs/go-block-format/version.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "version": "v0.0.3" -} diff --git a/vendor/github.com/ipfs/go-cid/.gitignore b/vendor/github.com/ipfs/go-cid/.gitignore deleted file mode 100644 index aaea8ed..0000000 --- a/vendor/github.com/ipfs/go-cid/.gitignore +++ /dev/null @@ -1 +0,0 @@ -cid-fuzz.zip diff --git a/vendor/github.com/ipfs/go-cid/LICENSE b/vendor/github.com/ipfs/go-cid/LICENSE deleted file mode 100644 index 0e32302..0000000 --- a/vendor/github.com/ipfs/go-cid/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 Protocol Labs, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/ipfs/go-cid/Makefile b/vendor/github.com/ipfs/go-cid/Makefile deleted file mode 100644 index 554bed3..0000000 --- a/vendor/github.com/ipfs/go-cid/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -all: deps - -deps: - go get github.com/mattn/goveralls - go get golang.org/x/tools/cmd/cover diff --git a/vendor/github.com/ipfs/go-cid/README.md b/vendor/github.com/ipfs/go-cid/README.md deleted file mode 100644 index 89da041..0000000 --- a/vendor/github.com/ipfs/go-cid/README.md +++ /dev/null @@ -1,115 +0,0 @@ -go-cid -================== - -[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io) -[![](https://img.shields.io/badge/project-IPFS-blue.svg?style=flat-square)](http://ipfs.io/) -[![](https://img.shields.io/badge/freenode-%23ipfs-blue.svg?style=flat-square)](http://webchat.freenode.net/?channels=%23ipfs) -[![](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) -[![GoDoc](https://godoc.org/github.com/ipfs/go-cid?status.svg)](https://godoc.org/github.com/ipfs/go-cid) -[![Coverage Status](https://coveralls.io/repos/github/ipfs/go-cid/badge.svg?branch=master)](https://coveralls.io/github/ipfs/go-cid?branch=master) -[![Travis CI](https://travis-ci.org/ipfs/go-cid.svg?branch=master)](https://travis-ci.org/ipfs/go-cid) - -> A package to handle content IDs in Go. - -This is an implementation in Go of the [CID spec](https://github.com/ipld/cid). -It is used in `go-ipfs` and related packages to refer to a typed hunk of data. - -## Lead Maintainer - -[Eric Myhre](https://github.com/warpfork) - -## Table of Contents - -- [Install](#install) -- [Usage](#usage) -- [API](#api) -- [Contribute](#contribute) -- [License](#license) - -## Install - -`go-cid` is a standard Go module which can be installed with: - -```sh -go get github.com/ipfs/go-cid -``` - -## Usage - -### Running tests - -Run tests with `go test` from the directory root - -```sh -go test -``` - -### Examples - -#### Parsing string input from users - -```go -// Create a cid from a marshaled string -c, err := cid.Decode("bafzbeigai3eoy2ccc7ybwjfz5r3rdxqrinwi4rwytly24tdbh6yk7zslrm") -if err != nil {...} - -fmt.Println("Got CID: ", c) -``` - -#### Creating a CID from scratch - -```go - -import ( - cid "github.com/ipfs/go-cid" - mc "github.com/multiformats/go-multicodec" - mh "github.com/multiformats/go-multihash" -) - -// Create a cid manually by specifying the 'prefix' parameters -pref := cid.Prefix{ - Version: 1, - Codec: mc.Raw, - MhType: mh.SHA2_256, - MhLength: -1, // default length -} - -// And then feed it some data -c, err := pref.Sum([]byte("Hello World!")) -if err != nil {...} - -fmt.Println("Created CID: ", c) -``` - -#### Check if two CIDs match - -```go -// To test if two cid's are equivalent, be sure to use the 'Equals' method: -if c1.Equals(c2) { - fmt.Println("These two refer to the same exact data!") -} -``` - -#### Check if some data matches a given CID - -```go -// To check if some data matches a given cid, -// Get your CIDs prefix, and use that to sum the data in question: -other, err := c.Prefix().Sum(mydata) -if err != nil {...} - -if !c.Equals(other) { - fmt.Println("This data is different.") -} - -``` - -## Contribute - -PRs are welcome! - -Small note: If editing the Readme, please conform to the [standard-readme](https://github.com/RichardLitt/standard-readme) specification. - -## License - -MIT © Jeromy Johnson diff --git a/vendor/github.com/ipfs/go-cid/builder.go b/vendor/github.com/ipfs/go-cid/builder.go deleted file mode 100644 index 3d2fc77..0000000 --- a/vendor/github.com/ipfs/go-cid/builder.go +++ /dev/null @@ -1,74 +0,0 @@ -package cid - -import ( - mh "github.com/multiformats/go-multihash" -) - -type Builder interface { - Sum(data []byte) (Cid, error) - GetCodec() uint64 - WithCodec(uint64) Builder -} - -type V0Builder struct{} - -type V1Builder struct { - Codec uint64 - MhType uint64 - MhLength int // MhLength <= 0 means the default length -} - -func (p Prefix) GetCodec() uint64 { - return p.Codec -} - -func (p Prefix) WithCodec(c uint64) Builder { - if c == p.Codec { - return p - } - p.Codec = c - if c != DagProtobuf { - p.Version = 1 - } - return p -} - -func (p V0Builder) Sum(data []byte) (Cid, error) { - hash, err := mh.Sum(data, mh.SHA2_256, -1) - if err != nil { - return Undef, err - } - return Cid{string(hash)}, nil -} - -func (p V0Builder) GetCodec() uint64 { - return DagProtobuf -} - -func (p V0Builder) WithCodec(c uint64) Builder { - if c == DagProtobuf { - return p - } - return V1Builder{Codec: c, MhType: mh.SHA2_256} -} - -func (p V1Builder) Sum(data []byte) (Cid, error) { - mhLen := p.MhLength - if mhLen <= 0 { - mhLen = -1 - } - hash, err := mh.Sum(data, p.MhType, mhLen) - if err != nil { - return Undef, err - } - return NewCidV1(p.Codec, hash), nil -} - -func (p V1Builder) GetCodec() uint64 { - return p.Codec -} - -func (p V1Builder) WithCodec(c uint64) Builder { - p.Codec = c - return p -} diff --git a/vendor/github.com/ipfs/go-cid/cid.go b/vendor/github.com/ipfs/go-cid/cid.go deleted file mode 100644 index bc5704a..0000000 --- a/vendor/github.com/ipfs/go-cid/cid.go +++ /dev/null @@ -1,771 +0,0 @@ -// Package cid implements the Content-IDentifiers specification -// (https://github.com/ipld/cid) in Go. CIDs are -// self-describing content-addressed identifiers useful for -// distributed information systems. CIDs are used in the IPFS -// (https://ipfs.io) project ecosystem. -// -// CIDs have two major versions. A CIDv0 corresponds to a multihash of type -// DagProtobuf, is deprecated and exists for compatibility reasons. Usually, -// CIDv1 should be used. -// -// A CIDv1 has four parts: -// -// ::= -// -// As shown above, the CID implementation relies heavily on Multiformats, -// particularly Multibase -// (https://github.com/multiformats/go-multibase), Multicodec -// (https://github.com/multiformats/multicodec) and Multihash -// implementations (https://github.com/multiformats/go-multihash). -package cid - -import ( - "bytes" - "encoding" - "encoding/binary" - "encoding/json" - "errors" - "fmt" - "io" - "strings" - - mbase "github.com/multiformats/go-multibase" - mh "github.com/multiformats/go-multihash" - varint "github.com/multiformats/go-varint" -) - -// UnsupportedVersionString just holds an error message -const UnsupportedVersionString = "" - -var ( - // ErrCidTooShort means that the cid passed to decode was not long - // enough to be a valid Cid - ErrCidTooShort = errors.New("cid too short") - - // ErrInvalidEncoding means that selected encoding is not supported - // by this Cid version - ErrInvalidEncoding = errors.New("invalid base encoding") -) - -// Consts below are DEPRECATED and left only for legacy reasons: -// -// Modern code should use consts from go-multicodec instead: -// -const ( - // common ones - Raw = 0x55 - DagProtobuf = 0x70 // https://ipld.io/docs/codecs/known/dag-pb/ - DagCBOR = 0x71 // https://ipld.io/docs/codecs/known/dag-cbor/ - DagJSON = 0x0129 // https://ipld.io/docs/codecs/known/dag-json/ - Libp2pKey = 0x72 // https://github.com/libp2p/specs/blob/master/peer-ids/peer-ids.md#peer-ids - - // other - GitRaw = 0x78 - DagJOSE = 0x85 // https://ipld.io/specs/codecs/dag-jose/spec/ - EthBlock = 0x90 - EthBlockList = 0x91 - EthTxTrie = 0x92 - EthTx = 0x93 - EthTxReceiptTrie = 0x94 - EthTxReceipt = 0x95 - EthStateTrie = 0x96 - EthAccountSnapshot = 0x97 - EthStorageTrie = 0x98 - BitcoinBlock = 0xb0 - BitcoinTx = 0xb1 - ZcashBlock = 0xc0 - ZcashTx = 0xc1 - DecredBlock = 0xe0 - DecredTx = 0xe1 - DashBlock = 0xf0 - DashTx = 0xf1 - FilCommitmentUnsealed = 0xf101 - FilCommitmentSealed = 0xf102 -) - -// tryNewCidV0 tries to convert a multihash into a CIDv0 CID and returns an -// error on failure. -func tryNewCidV0(mhash mh.Multihash) (Cid, error) { - // Need to make sure hash is valid for CidV0 otherwise we will - // incorrectly detect it as CidV1 in the Version() method - dec, err := mh.Decode(mhash) - if err != nil { - return Undef, err - } - if dec.Code != mh.SHA2_256 || dec.Length != 32 { - return Undef, fmt.Errorf("invalid hash for cidv0 %d-%d", dec.Code, dec.Length) - } - return Cid{string(mhash)}, nil -} - -// NewCidV0 returns a Cid-wrapped multihash. -// They exist to allow IPFS to work with Cids while keeping -// compatibility with the plain-multihash format used used in IPFS. -// NewCidV1 should be used preferentially. -// -// Panics if the multihash isn't sha2-256. -func NewCidV0(mhash mh.Multihash) Cid { - c, err := tryNewCidV0(mhash) - if err != nil { - panic(err) - } - return c -} - -// NewCidV1 returns a new Cid using the given multicodec-packed -// content type. -// -// Panics if the multihash is invalid. -func NewCidV1(codecType uint64, mhash mh.Multihash) Cid { - hashlen := len(mhash) - - // Two 8 bytes (max) numbers plus hash. - // We use strings.Builder to only allocate once. - var b strings.Builder - b.Grow(1 + varint.UvarintSize(codecType) + hashlen) - - b.WriteByte(1) - - var buf [binary.MaxVarintLen64]byte - n := varint.PutUvarint(buf[:], codecType) - b.Write(buf[:n]) - - cn, _ := b.Write(mhash) - if cn != hashlen { - panic("copy hash length is inconsistent") - } - - return Cid{b.String()} -} - -var ( - _ encoding.BinaryMarshaler = Cid{} - _ encoding.BinaryUnmarshaler = (*Cid)(nil) - _ encoding.TextMarshaler = Cid{} - _ encoding.TextUnmarshaler = (*Cid)(nil) -) - -// Cid represents a self-describing content addressed -// identifier. It is formed by a Version, a Codec (which indicates -// a multicodec-packed content type) and a Multihash. -type Cid struct{ str string } - -// Undef can be used to represent a nil or undefined Cid, using Cid{} -// directly is also acceptable. -var Undef = Cid{} - -// Defined returns true if a Cid is defined -// Calling any other methods on an undefined Cid will result in -// undefined behavior. -func (c Cid) Defined() bool { - return c.str != "" -} - -// Parse is a short-hand function to perform Decode, Cast etc... on -// a generic interface{} type. -func Parse(v interface{}) (Cid, error) { - switch v2 := v.(type) { - case string: - if strings.Contains(v2, "/ipfs/") { - return Decode(strings.Split(v2, "/ipfs/")[1]) - } - return Decode(v2) - case []byte: - return Cast(v2) - case mh.Multihash: - return tryNewCidV0(v2) - case Cid: - return v2, nil - default: - return Undef, fmt.Errorf("can't parse %+v as Cid", v2) - } -} - -// Decode parses a Cid-encoded string and returns a Cid object. -// For CidV1, a Cid-encoded string is primarily a multibase string: -// -// -// -// The base-encoded string represents a: -// -// -// -// Decode will also detect and parse CidV0 strings. Strings -// starting with "Qm" are considered CidV0 and treated directly -// as B58-encoded multihashes. -func Decode(v string) (Cid, error) { - if len(v) < 2 { - return Undef, ErrCidTooShort - } - - if len(v) == 46 && v[:2] == "Qm" { - hash, err := mh.FromB58String(v) - if err != nil { - return Undef, err - } - - return tryNewCidV0(hash) - } - - _, data, err := mbase.Decode(v) - if err != nil { - return Undef, err - } - - return Cast(data) -} - -// Extract the encoding from a Cid. If Decode on the same string did -// not return an error neither will this function. -func ExtractEncoding(v string) (mbase.Encoding, error) { - if len(v) < 2 { - return -1, ErrCidTooShort - } - - if len(v) == 46 && v[:2] == "Qm" { - return mbase.Base58BTC, nil - } - - encoding := mbase.Encoding(v[0]) - - // check encoding is valid - _, err := mbase.NewEncoder(encoding) - if err != nil { - return -1, err - } - - return encoding, nil -} - -// Cast takes a Cid data slice, parses it and returns a Cid. -// For CidV1, the data buffer is in the form: -// -// -// -// CidV0 are also supported. In particular, data buffers starting -// with length 34 bytes, which starts with bytes [18,32...] are considered -// binary multihashes. -// -// Please use decode when parsing a regular Cid string, as Cast does not -// expect multibase-encoded data. Cast accepts the output of Cid.Bytes(). -func Cast(data []byte) (Cid, error) { - nr, c, err := CidFromBytes(data) - if err != nil { - return Undef, err - } - - if nr != len(data) { - return Undef, fmt.Errorf("trailing bytes in data buffer passed to cid Cast") - } - - return c, nil -} - -// UnmarshalBinary is equivalent to Cast(). It implements the -// encoding.BinaryUnmarshaler interface. -func (c *Cid) UnmarshalBinary(data []byte) error { - casted, err := Cast(data) - if err != nil { - return err - } - c.str = casted.str - return nil -} - -// UnmarshalText is equivalent to Decode(). It implements the -// encoding.TextUnmarshaler interface. -func (c *Cid) UnmarshalText(text []byte) error { - decodedCid, err := Decode(string(text)) - if err != nil { - return err - } - c.str = decodedCid.str - return nil -} - -// Version returns the Cid version. -func (c Cid) Version() uint64 { - if len(c.str) == 34 && c.str[0] == 18 && c.str[1] == 32 { - return 0 - } - return 1 -} - -// Type returns the multicodec-packed content type of a Cid. -func (c Cid) Type() uint64 { - if c.Version() == 0 { - return DagProtobuf - } - _, n, _ := uvarint(c.str) - codec, _, _ := uvarint(c.str[n:]) - return codec -} - -// String returns the default string representation of a -// Cid. Currently, Base32 is used for CIDV1 as the encoding for the -// multibase string, Base58 is used for CIDV0. -func (c Cid) String() string { - switch c.Version() { - case 0: - return c.Hash().B58String() - case 1: - mbstr, err := mbase.Encode(mbase.Base32, c.Bytes()) - if err != nil { - panic("should not error with hardcoded mbase: " + err.Error()) - } - - return mbstr - default: - panic("not possible to reach this point") - } -} - -// String returns the string representation of a Cid -// encoded is selected base -func (c Cid) StringOfBase(base mbase.Encoding) (string, error) { - switch c.Version() { - case 0: - if base != mbase.Base58BTC { - return "", ErrInvalidEncoding - } - return c.Hash().B58String(), nil - case 1: - return mbase.Encode(base, c.Bytes()) - default: - panic("not possible to reach this point") - } -} - -// Encode return the string representation of a Cid in a given base -// when applicable. Version 0 Cid's are always in Base58 as they do -// not take a multibase prefix. -func (c Cid) Encode(base mbase.Encoder) string { - switch c.Version() { - case 0: - return c.Hash().B58String() - case 1: - return base.Encode(c.Bytes()) - default: - panic("not possible to reach this point") - } -} - -// Hash returns the multihash contained by a Cid. -func (c Cid) Hash() mh.Multihash { - bytes := c.Bytes() - - if c.Version() == 0 { - return mh.Multihash(bytes) - } - - // skip version length - _, n1, _ := varint.FromUvarint(bytes) - // skip codec length - _, n2, _ := varint.FromUvarint(bytes[n1:]) - - return mh.Multihash(bytes[n1+n2:]) -} - -// Bytes returns the byte representation of a Cid. -// The output of bytes can be parsed back into a Cid -// with Cast(). -func (c Cid) Bytes() []byte { - return []byte(c.str) -} - -// ByteLen returns the length of the CID in bytes. -// It's equivalent to `len(c.Bytes())`, but works without an allocation, -// and should therefore be preferred. -// -// (See also the WriteTo method for other important operations that work without allocation.) -func (c Cid) ByteLen() int { - return len(c.str) -} - -// WriteBytes writes the CID bytes to the given writer. -// This method works without incurring any allocation. -// -// (See also the ByteLen method for other important operations that work without allocation.) -func (c Cid) WriteBytes(w io.Writer) (int, error) { - n, err := io.WriteString(w, c.str) - if err != nil { - return n, err - } - if n != len(c.str) { - return n, fmt.Errorf("failed to write entire cid string") - } - return n, nil -} - -// MarshalBinary is equivalent to Bytes(). It implements the -// encoding.BinaryMarshaler interface. -func (c Cid) MarshalBinary() ([]byte, error) { - return c.Bytes(), nil -} - -// MarshalText is equivalent to String(). It implements the -// encoding.TextMarshaler interface. -func (c Cid) MarshalText() ([]byte, error) { - return []byte(c.String()), nil -} - -// Equals checks that two Cids are the same. -// In order for two Cids to be considered equal, the -// Version, the Codec and the Multihash must match. -func (c Cid) Equals(o Cid) bool { - return c == o -} - -// UnmarshalJSON parses the JSON representation of a Cid. -func (c *Cid) UnmarshalJSON(b []byte) error { - if len(b) < 2 { - return fmt.Errorf("invalid cid json blob") - } - obj := struct { - CidTarget string `json:"/"` - }{} - objptr := &obj - err := json.Unmarshal(b, &objptr) - if err != nil { - return err - } - if objptr == nil { - *c = Cid{} - return nil - } - - if obj.CidTarget == "" { - return fmt.Errorf("cid was incorrectly formatted") - } - - out, err := Decode(obj.CidTarget) - if err != nil { - return err - } - - *c = out - - return nil -} - -// MarshalJSON procudes a JSON representation of a Cid, which looks as follows: -// -// { "/": "" } -// -// Note that this formatting comes from the IPLD specification -// (https://github.com/ipld/specs/tree/master/ipld) -func (c Cid) MarshalJSON() ([]byte, error) { - if !c.Defined() { - return []byte("null"), nil - } - return []byte(fmt.Sprintf("{\"/\":\"%s\"}", c.String())), nil -} - -// KeyString returns the binary representation of the Cid as a string -func (c Cid) KeyString() string { - return c.str -} - -// Loggable returns a Loggable (as defined by -// https://godoc.org/github.com/ipfs/go-log). -func (c Cid) Loggable() map[string]interface{} { - return map[string]interface{}{ - "cid": c, - } -} - -// Prefix builds and returns a Prefix out of a Cid. -func (c Cid) Prefix() Prefix { - if c.Version() == 0 { - return Prefix{ - MhType: mh.SHA2_256, - MhLength: 32, - Version: 0, - Codec: DagProtobuf, - } - } - - offset := 0 - version, n, _ := uvarint(c.str[offset:]) - offset += n - codec, n, _ := uvarint(c.str[offset:]) - offset += n - mhtype, n, _ := uvarint(c.str[offset:]) - offset += n - mhlen, _, _ := uvarint(c.str[offset:]) - - return Prefix{ - MhType: mhtype, - MhLength: int(mhlen), - Version: version, - Codec: codec, - } -} - -// Prefix represents all the metadata of a Cid, -// that is, the Version, the Codec, the Multihash type -// and the Multihash length. It does not contains -// any actual content information. -// NOTE: The use -1 in MhLength to mean default length is deprecated, -// use the V0Builder or V1Builder structures instead -type Prefix struct { - Version uint64 - Codec uint64 - MhType uint64 - MhLength int -} - -// Sum uses the information in a prefix to perform a multihash.Sum() -// and return a newly constructed Cid with the resulting multihash. -func (p Prefix) Sum(data []byte) (Cid, error) { - length := p.MhLength - if p.MhType == mh.IDENTITY { - length = -1 - } - - if p.Version == 0 && (p.MhType != mh.SHA2_256 || - (p.MhLength != 32 && p.MhLength != -1)) { - - return Undef, fmt.Errorf("invalid v0 prefix") - } - - hash, err := mh.Sum(data, p.MhType, length) - if err != nil { - return Undef, err - } - - switch p.Version { - case 0: - return NewCidV0(hash), nil - case 1: - return NewCidV1(p.Codec, hash), nil - default: - return Undef, fmt.Errorf("invalid cid version") - } -} - -// Bytes returns a byte representation of a Prefix. It looks like: -// -// -func (p Prefix) Bytes() []byte { - size := varint.UvarintSize(p.Version) - size += varint.UvarintSize(p.Codec) - size += varint.UvarintSize(p.MhType) - size += varint.UvarintSize(uint64(p.MhLength)) - - buf := make([]byte, size) - n := varint.PutUvarint(buf, p.Version) - n += varint.PutUvarint(buf[n:], p.Codec) - n += varint.PutUvarint(buf[n:], p.MhType) - n += varint.PutUvarint(buf[n:], uint64(p.MhLength)) - if n != size { - panic("size mismatch") - } - return buf -} - -// PrefixFromBytes parses a Prefix-byte representation onto a -// Prefix. -func PrefixFromBytes(buf []byte) (Prefix, error) { - r := bytes.NewReader(buf) - vers, err := varint.ReadUvarint(r) - if err != nil { - return Prefix{}, err - } - - codec, err := varint.ReadUvarint(r) - if err != nil { - return Prefix{}, err - } - - mhtype, err := varint.ReadUvarint(r) - if err != nil { - return Prefix{}, err - } - - mhlen, err := varint.ReadUvarint(r) - if err != nil { - return Prefix{}, err - } - - return Prefix{ - Version: vers, - Codec: codec, - MhType: mhtype, - MhLength: int(mhlen), - }, nil -} - -func CidFromBytes(data []byte) (int, Cid, error) { - if len(data) > 2 && data[0] == mh.SHA2_256 && data[1] == 32 { - if len(data) < 34 { - return 0, Undef, fmt.Errorf("not enough bytes for cid v0") - } - - h, err := mh.Cast(data[:34]) - if err != nil { - return 0, Undef, err - } - - return 34, Cid{string(h)}, nil - } - - vers, n, err := varint.FromUvarint(data) - if err != nil { - return 0, Undef, err - } - - if vers != 1 { - return 0, Undef, fmt.Errorf("expected 1 as the cid version number, got: %d", vers) - } - - _, cn, err := varint.FromUvarint(data[n:]) - if err != nil { - return 0, Undef, err - } - - mhnr, _, err := mh.MHFromBytes(data[n+cn:]) - if err != nil { - return 0, Undef, err - } - - l := n + cn + mhnr - - return l, Cid{string(data[0:l])}, nil -} - -func toBufByteReader(r io.Reader, dst []byte) *bufByteReader { - // If the reader already implements ByteReader, use it directly. - // Otherwise, use a fallback that does 1-byte Reads. - if br, ok := r.(io.ByteReader); ok { - return &bufByteReader{direct: br, dst: dst} - } - return &bufByteReader{fallback: r, dst: dst} -} - -type bufByteReader struct { - direct io.ByteReader - fallback io.Reader - - dst []byte -} - -func (r *bufByteReader) ReadByte() (byte, error) { - // The underlying reader has ReadByte; use it. - if br := r.direct; br != nil { - b, err := br.ReadByte() - if err != nil { - return 0, err - } - r.dst = append(r.dst, b) - return b, nil - } - - // Fall back to a one-byte Read. - // TODO: consider reading straight into dst, - // once we have benchmarks and if they prove that to be faster. - var p [1]byte - if _, err := io.ReadFull(r.fallback, p[:]); err != nil { - return 0, err - } - r.dst = append(r.dst, p[0]) - return p[0], nil -} - -// CidFromReader reads a precise number of bytes for a CID from a given reader. -// It returns the number of bytes read, the CID, and any error encountered. -// The number of bytes read is accurate even if a non-nil error is returned. -// -// It's recommended to supply a reader that buffers and implements io.ByteReader, -// as CidFromReader has to do many single-byte reads to decode varints. -// If the argument only implements io.Reader, single-byte Read calls are used instead. -func CidFromReader(r io.Reader) (int, Cid, error) { - // 64 bytes is enough for any CIDv0, - // and it's enough for most CIDv1s in practice. - // If the digest is too long, we'll allocate more. - br := toBufByteReader(r, make([]byte, 0, 64)) - - // We read the first varint, to tell if this is a CIDv0 or a CIDv1. - // The varint package wants a io.ByteReader, so we must wrap our io.Reader. - vers, err := varint.ReadUvarint(br) - if err != nil { - return len(br.dst), Undef, err - } - - // If we have a CIDv0, read the rest of the bytes and cast the buffer. - if vers == mh.SHA2_256 { - if n, err := io.ReadFull(r, br.dst[1:34]); err != nil { - return len(br.dst) + n, Undef, err - } - - br.dst = br.dst[:34] - h, err := mh.Cast(br.dst) - if err != nil { - return len(br.dst), Undef, err - } - - return len(br.dst), Cid{string(h)}, nil - } - - if vers != 1 { - return len(br.dst), Undef, fmt.Errorf("expected 1 as the cid version number, got: %d", vers) - } - - // CID block encoding multicodec. - _, err = varint.ReadUvarint(br) - if err != nil { - return len(br.dst), Undef, err - } - - // We could replace most of the code below with go-multihash's ReadMultihash. - // Note that it would save code, but prevent reusing buffers. - // Plus, we already have a ByteReader now. - mhStart := len(br.dst) - - // Multihash hash function code. - _, err = varint.ReadUvarint(br) - if err != nil { - return len(br.dst), Undef, err - } - - // Multihash digest length. - mhl, err := varint.ReadUvarint(br) - if err != nil { - return len(br.dst), Undef, err - } - - // Refuse to make large allocations to prevent OOMs due to bugs. - const maxDigestAlloc = 32 << 20 // 32MiB - if mhl > maxDigestAlloc { - return len(br.dst), Undef, fmt.Errorf("refusing to allocate %d bytes for a digest", mhl) - } - - // Fine to convert mhl to int, given maxDigestAlloc. - prefixLength := len(br.dst) - cidLength := prefixLength + int(mhl) - if cidLength > cap(br.dst) { - // If the multihash digest doesn't fit in our initial 64 bytes, - // efficiently extend the slice via append+make. - br.dst = append(br.dst, make([]byte, cidLength-len(br.dst))...) - } else { - // The multihash digest fits inside our buffer, - // so just extend its capacity. - br.dst = br.dst[:cidLength] - } - - if n, err := io.ReadFull(r, br.dst[prefixLength:cidLength]); err != nil { - // We can't use len(br.dst) here, - // as we've only read n bytes past prefixLength. - return prefixLength + n, Undef, err - } - - // This simply ensures the multihash is valid. - // TODO: consider removing this bit, as it's probably redundant; - // for now, it helps ensure consistency with CidFromBytes. - _, _, err = mh.MHFromBytes(br.dst[mhStart:]) - if err != nil { - return len(br.dst), Undef, err - } - - return len(br.dst), Cid{string(br.dst)}, nil -} diff --git a/vendor/github.com/ipfs/go-cid/cid_fuzz.go b/vendor/github.com/ipfs/go-cid/cid_fuzz.go deleted file mode 100644 index 0b0408c..0000000 --- a/vendor/github.com/ipfs/go-cid/cid_fuzz.go +++ /dev/null @@ -1,37 +0,0 @@ -//go:build gofuzz -// +build gofuzz - -package cid - -func Fuzz(data []byte) int { - cid, err := Cast(data) - if err != nil { - return 0 - } - - _ = cid.Bytes() - _ = cid.String() - p := cid.Prefix() - _ = p.Bytes() - - if !cid.Equals(cid) { - panic("inequality") - } - - // json loop - json, err := cid.MarshalJSON() - if err != nil { - panic(err.Error()) - } - cid2 := Cid{} - err = cid2.UnmarshalJSON(json) - if err != nil { - panic(err.Error()) - } - - if !cid.Equals(cid2) { - panic("json loop not equal") - } - - return 1 -} diff --git a/vendor/github.com/ipfs/go-cid/codecov.yml b/vendor/github.com/ipfs/go-cid/codecov.yml deleted file mode 100644 index 5f88a9e..0000000 --- a/vendor/github.com/ipfs/go-cid/codecov.yml +++ /dev/null @@ -1,3 +0,0 @@ -coverage: - range: "50...100" -comment: off diff --git a/vendor/github.com/ipfs/go-cid/deprecated.go b/vendor/github.com/ipfs/go-cid/deprecated.go deleted file mode 100644 index cd889f9..0000000 --- a/vendor/github.com/ipfs/go-cid/deprecated.go +++ /dev/null @@ -1,28 +0,0 @@ -package cid - -import ( - mh "github.com/multiformats/go-multihash" -) - -// NewPrefixV0 returns a CIDv0 prefix with the specified multihash type. -// DEPRECATED: Use V0Builder -func NewPrefixV0(mhType uint64) Prefix { - return Prefix{ - MhType: mhType, - MhLength: mh.DefaultLengths[mhType], - Version: 0, - Codec: DagProtobuf, - } -} - -// NewPrefixV1 returns a CIDv1 prefix with the specified codec and multihash -// type. -// DEPRECATED: Use V1Builder -func NewPrefixV1(codecType uint64, mhType uint64) Prefix { - return Prefix{ - MhType: mhType, - MhLength: mh.DefaultLengths[mhType], - Version: 1, - Codec: codecType, - } -} diff --git a/vendor/github.com/ipfs/go-cid/set.go b/vendor/github.com/ipfs/go-cid/set.go deleted file mode 100644 index eb3b3f0..0000000 --- a/vendor/github.com/ipfs/go-cid/set.go +++ /dev/null @@ -1,65 +0,0 @@ -package cid - -// Set is a implementation of a set of Cids, that is, a structure -// to which holds a single copy of every Cids that is added to it. -type Set struct { - set map[Cid]struct{} -} - -// NewSet initializes and returns a new Set. -func NewSet() *Set { - return &Set{set: make(map[Cid]struct{})} -} - -// Add puts a Cid in the Set. -func (s *Set) Add(c Cid) { - s.set[c] = struct{}{} -} - -// Has returns if the Set contains a given Cid. -func (s *Set) Has(c Cid) bool { - _, ok := s.set[c] - return ok -} - -// Remove deletes a Cid from the Set. -func (s *Set) Remove(c Cid) { - delete(s.set, c) -} - -// Len returns how many elements the Set has. -func (s *Set) Len() int { - return len(s.set) -} - -// Keys returns the Cids in the set. -func (s *Set) Keys() []Cid { - out := make([]Cid, 0, len(s.set)) - for k := range s.set { - out = append(out, k) - } - return out -} - -// Visit adds a Cid to the set only if it is -// not in it already. -func (s *Set) Visit(c Cid) bool { - if !s.Has(c) { - s.Add(c) - return true - } - - return false -} - -// ForEach allows to run a custom function on each -// Cid in the set. -func (s *Set) ForEach(f func(c Cid) error) error { - for c := range s.set { - err := f(c) - if err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/ipfs/go-cid/varint.go b/vendor/github.com/ipfs/go-cid/varint.go deleted file mode 100644 index e25c843..0000000 --- a/vendor/github.com/ipfs/go-cid/varint.go +++ /dev/null @@ -1,37 +0,0 @@ -package cid - -import ( - "github.com/multiformats/go-varint" -) - -// Version of varint function that works with a string rather than -// []byte to avoid unnecessary allocation - -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license as given at https://golang.org/LICENSE - -// uvarint decodes a uint64 from buf and returns that value and the -// number of bytes read (> 0). If an error occurred, then 0 is -// returned for both the value and the number of bytes read, and an -// error is returned. -func uvarint(buf string) (uint64, int, error) { - var x uint64 - var s uint - // we have a binary string so we can't use a range loop - for i := 0; i < len(buf); i++ { - b := buf[i] - if b < 0x80 { - if i > 9 || i == 9 && b > 1 { - return 0, 0, varint.ErrOverflow - } - if b == 0 && i > 0 { - return 0, 0, varint.ErrNotMinimal - } - return x | uint64(b)< An implementation of a cbor encoded merkledag object. - -## Status - -This library **has alternatives available**: For new projects, prefer using the [cbor codec](https://github.com/ipld/go-ipld-prime/tree/master/codec/dagcbor) included with [go-ipld-prime](https://github.com/ipld/go-ipld-prime). - -This library is in **standby** mode. It works, but we recommend migrating to alternatives if possible. New features are unlikely to be added here. - -## Lead Maintainer - -[Eric Myhre](https://github.com/warpfork) - -## Table of Contents - -- [Install](#install) -- [Usage](#usage) -- [API](#api) -- [Contribute](#contribute) -- [License](#license) - -## Install - -```sh -make install -``` - -## Usage - -Note: This package isn't the easiest to use. -```go -// Make an object -obj := map[interface{}]interface{}{ - "foo": "bar", - "baz": &Link{ - Target: myCid, - }, -} - -// Parse it into an ipldcbor node -nd, err := WrapMap(obj) - -fmt.Println(nd.Links()) - -``` - -## Contribute - -PRs are welcome! - -Small note: If editing the Readme, please conform to the [standard-readme](https://github.com/RichardLitt/standard-readme) specification. - -## License - -MIT © Jeromy Johnson diff --git a/vendor/github.com/ipfs/go-ipld-cbor/codecov.yml b/vendor/github.com/ipfs/go-ipld-cbor/codecov.yml deleted file mode 100644 index 5f88a9e..0000000 --- a/vendor/github.com/ipfs/go-ipld-cbor/codecov.yml +++ /dev/null @@ -1,3 +0,0 @@ -coverage: - range: "50...100" -comment: off diff --git a/vendor/github.com/ipfs/go-ipld-cbor/store.go b/vendor/github.com/ipfs/go-ipld-cbor/store.go deleted file mode 100644 index 3b999c6..0000000 --- a/vendor/github.com/ipfs/go-ipld-cbor/store.go +++ /dev/null @@ -1,181 +0,0 @@ -package cbornode - -import ( - "bytes" - "context" - "fmt" - - block "github.com/ipfs/go-block-format" - cid "github.com/ipfs/go-cid" - mh "github.com/multiformats/go-multihash" - atlas "github.com/polydawn/refmt/obj/atlas" - cbg "github.com/whyrusleeping/cbor-gen" -) - -// IpldStore wraps a Blockstore and provides an interface for storing and retrieving CBOR encoded data. -type IpldStore interface { - Get(ctx context.Context, c cid.Cid, out interface{}) error - Put(ctx context.Context, v interface{}) (cid.Cid, error) -} - -// IpldBlockstore defines a subset of the go-ipfs-blockstore Blockstore interface providing methods -// for storing and retrieving block-centered data. -type IpldBlockstore interface { - Get(context.Context, cid.Cid) (block.Block, error) - Put(context.Context, block.Block) error -} - -// IpldBlockstoreViewer is a trait that enables zero-copy access to blocks in -// a blockstore. -type IpldBlockstoreViewer interface { - // View provides zero-copy access to blocks in a blockstore. The callback - // function will be invoked with the value for the key. The user MUST not - // modify the byte array, as it could be memory-mapped. - View(cid.Cid, func([]byte) error) error -} - -// BasicIpldStore wraps and IpldBlockstore and implements the IpldStore interface. -type BasicIpldStore struct { - Blocks IpldBlockstore - Viewer IpldBlockstoreViewer - - Atlas *atlas.Atlas -} - -var _ IpldStore = &BasicIpldStore{} - -// NewCborStore returns an IpldStore implementation backed by the provided IpldBlockstore. -func NewCborStore(bs IpldBlockstore) *BasicIpldStore { - viewer, _ := bs.(IpldBlockstoreViewer) - return &BasicIpldStore{Blocks: bs, Viewer: viewer} -} - -// Get reads and unmarshals the content at `c` into `out`. -func (s *BasicIpldStore) Get(ctx context.Context, c cid.Cid, out interface{}) error { - if s.Viewer != nil { - // zero-copy path. - return s.Viewer.View(c, func(b []byte) error { - return s.decode(b, out) - }) - } - - blk, err := s.Blocks.Get(ctx, c) - if err != nil { - return err - } - return s.decode(blk.RawData(), out) -} - -func (s *BasicIpldStore) decode(b []byte, out interface{}) error { - cu, ok := out.(cbg.CBORUnmarshaler) - if ok { - if err := cu.UnmarshalCBOR(bytes.NewReader(b)); err != nil { - return NewSerializationError(err) - } - return nil - } - panic("should never come here") -} - -type cidProvider interface { - Cid() cid.Cid -} - -// Put marshals and writes content `v` to the backing blockstore returning its CID. -func (s *BasicIpldStore) Put(ctx context.Context, v interface{}) (cid.Cid, error) { - mhType := uint64(mh.BLAKE2B_MIN + 31) - mhLen := -1 - codec := uint64(cid.DagCBOR) - - var expCid cid.Cid - if c, ok := v.(cidProvider); ok { - expCid := c.Cid() - pref := expCid.Prefix() - mhType = pref.MhType - mhLen = pref.MhLength - codec = pref.Codec - } - - cm, ok := v.(cbg.CBORMarshaler) - if ok { - buf := new(bytes.Buffer) - if err := cm.MarshalCBOR(buf); err != nil { - return cid.Undef, NewSerializationError(err) - } - - pref := cid.Prefix{ - Codec: codec, - MhType: mhType, - MhLength: mhLen, - Version: 1, - } - c, err := pref.Sum(buf.Bytes()) - if err != nil { - return cid.Undef, err - } - - blk, err := block.NewBlockWithCid(buf.Bytes(), c) - if err != nil { - return cid.Undef, err - } - - if err := s.Blocks.Put(ctx, blk); err != nil { - return cid.Undef, err - } - - blkCid := blk.Cid() - if expCid != cid.Undef && blkCid != expCid { - return cid.Undef, fmt.Errorf("your object is not being serialized the way it expects to") - } - - return blkCid, nil - } - - return cid.Undef, fmt.Errorf("unsupport get object through reflect in fvm, tinygo unable to build it") -} - -func NewSerializationError(err error) error { - return SerializationError{err} -} - -type SerializationError struct { - err error -} - -func (se SerializationError) Error() string { - return se.err.Error() -} - -func (se SerializationError) Unwrap() error { - return se.err -} - -func (se SerializationError) Is(o error) bool { - _, ok := o.(*SerializationError) - return ok -} - -func NewMemCborStore() IpldStore { - return NewCborStore(newMockBlocks()) -} - -type mockBlocks struct { - data map[cid.Cid]block.Block -} - -func newMockBlocks() *mockBlocks { - return &mockBlocks{make(map[cid.Cid]block.Block)} -} - -func (mb *mockBlocks) Get(ctx context.Context, c cid.Cid) (block.Block, error) { - d, ok := mb.data[c] - if ok { - return d, nil - } - return nil, fmt.Errorf("not found %s", c) -} - -func (mb *mockBlocks) Put(ctx context.Context, b block.Block) error { - mb.data[b.Cid()] = b - return nil -} diff --git a/vendor/github.com/ipfs/go-ipld-cbor/version.json b/vendor/github.com/ipfs/go-ipld-cbor/version.json deleted file mode 100644 index b9ab021..0000000 --- a/vendor/github.com/ipfs/go-ipld-cbor/version.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "version": "v0.0.5" -} diff --git a/vendor/github.com/klauspost/cpuid/v2/.gitignore b/vendor/github.com/klauspost/cpuid/v2/.gitignore deleted file mode 100644 index daf913b..0000000 --- a/vendor/github.com/klauspost/cpuid/v2/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/klauspost/cpuid/v2/.goreleaser.yml b/vendor/github.com/klauspost/cpuid/v2/.goreleaser.yml deleted file mode 100644 index 944cc00..0000000 --- a/vendor/github.com/klauspost/cpuid/v2/.goreleaser.yml +++ /dev/null @@ -1,74 +0,0 @@ -# This is an example goreleaser.yaml file with some sane defaults. -# Make sure to check the documentation at http://goreleaser.com - -builds: - - - id: "cpuid" - binary: cpuid - main: ./cmd/cpuid/main.go - env: - - CGO_ENABLED=0 - flags: - - -ldflags=-s -w - goos: - - aix - - linux - - freebsd - - netbsd - - windows - - darwin - goarch: - - 386 - - amd64 - - arm64 - goarm: - - 7 - -archives: - - - id: cpuid - name_template: "cpuid-{{ .Os }}_{{ .Arch }}_{{ .Version }}" - replacements: - aix: AIX - darwin: OSX - linux: Linux - windows: Windows - 386: i386 - amd64: x86_64 - freebsd: FreeBSD - netbsd: NetBSD - format_overrides: - - goos: windows - format: zip - files: - - LICENSE -checksum: - name_template: 'checksums.txt' -snapshot: - name_template: "{{ .Tag }}-next" -changelog: - sort: asc - filters: - exclude: - - '^doc:' - - '^docs:' - - '^test:' - - '^tests:' - - '^Update\sREADME.md' - -nfpms: - - - file_name_template: "cpuid_package_{{ .Version }}_{{ .Os }}_{{ .Arch }}" - vendor: Klaus Post - homepage: https://github.com/klauspost/cpuid - maintainer: Klaus Post - description: CPUID Tool - license: BSD 3-Clause - formats: - - deb - - rpm - replacements: - darwin: Darwin - linux: Linux - freebsd: FreeBSD - amd64: x86_64 diff --git a/vendor/github.com/klauspost/cpuid/v2/CONTRIBUTING.txt b/vendor/github.com/klauspost/cpuid/v2/CONTRIBUTING.txt deleted file mode 100644 index 2ef4714..0000000 --- a/vendor/github.com/klauspost/cpuid/v2/CONTRIBUTING.txt +++ /dev/null @@ -1,35 +0,0 @@ -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2015- Klaus Post & Contributors. -Email: klauspost@gmail.com - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. diff --git a/vendor/github.com/klauspost/cpuid/v2/LICENSE b/vendor/github.com/klauspost/cpuid/v2/LICENSE deleted file mode 100644 index 5cec7ee..0000000 --- a/vendor/github.com/klauspost/cpuid/v2/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Klaus Post - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/vendor/github.com/klauspost/cpuid/v2/README.md b/vendor/github.com/klauspost/cpuid/v2/README.md deleted file mode 100644 index ea7df3d..0000000 --- a/vendor/github.com/klauspost/cpuid/v2/README.md +++ /dev/null @@ -1,258 +0,0 @@ -# cpuid -Package cpuid provides information about the CPU running the current program. - -CPU features are detected on startup, and kept for fast access through the life of the application. -Currently x86 / x64 (AMD64/i386) and ARM (ARM64) is supported, and no external C (cgo) code is used, which should make the library very easy to use. - -You can access the CPU information by accessing the shared CPU variable of the cpuid library. - -Package home: https://github.com/klauspost/cpuid - -[![PkgGoDev](https://pkg.go.dev/badge/github.com/klauspost/cpuid)](https://pkg.go.dev/github.com/klauspost/cpuid/v2) -[![Build Status][3]][4] - -[3]: https://travis-ci.org/klauspost/cpuid.svg?branch=master -[4]: https://travis-ci.org/klauspost/cpuid - -## installing - -`go get -u github.com/klauspost/cpuid/v2` using modules. - -Drop `v2` for others. - -## example - -```Go -package main - -import ( - "fmt" - "strings" - - . "github.com/klauspost/cpuid/v2" -) - -func main() { - // Print basic CPU information: - fmt.Println("Name:", CPU.BrandName) - fmt.Println("PhysicalCores:", CPU.PhysicalCores) - fmt.Println("ThreadsPerCore:", CPU.ThreadsPerCore) - fmt.Println("LogicalCores:", CPU.LogicalCores) - fmt.Println("Family", CPU.Family, "Model:", CPU.Model, "Vendor ID:", CPU.VendorID) - fmt.Println("Features:", strings.Join(CPU.FeatureSet(), ",")) - fmt.Println("Cacheline bytes:", CPU.CacheLine) - fmt.Println("L1 Data Cache:", CPU.Cache.L1D, "bytes") - fmt.Println("L1 Instruction Cache:", CPU.Cache.L1I, "bytes") - fmt.Println("L2 Cache:", CPU.Cache.L2, "bytes") - fmt.Println("L3 Cache:", CPU.Cache.L3, "bytes") - fmt.Println("Frequency", CPU.Hz, "hz") - - // Test if we have these specific features: - if CPU.Supports(SSE, SSE2) { - fmt.Println("We have Streaming SIMD 2 Extensions") - } -} -``` - -Sample output: -``` ->go run main.go -Name: AMD Ryzen 9 3950X 16-Core Processor -PhysicalCores: 16 -ThreadsPerCore: 2 -LogicalCores: 32 -Family 23 Model: 113 Vendor ID: AMD -Features: ADX,AESNI,AVX,AVX2,BMI1,BMI2,CLMUL,CMOV,CX16,F16C,FMA3,HTT,HYPERVISOR,LZCNT,MMX,MMXEXT,NX,POPCNT,RDRAND,RDSEED,RDTSCP,SHA,SSE,SSE2,SSE3,SSE4,SSE42,SSE4A,SSSE3 -Cacheline bytes: 64 -L1 Data Cache: 32768 bytes -L1 Instruction Cache: 32768 bytes -L2 Cache: 524288 bytes -L3 Cache: 16777216 bytes -Frequency 0 hz -We have Streaming SIMD 2 Extensions -``` - -# usage - -The `cpuid.CPU` provides access to CPU features. Use `cpuid.CPU.Supports()` to check for CPU features. -A faster `cpuid.CPU.Has()` is provided which will usually be inlined by the gc compiler. - -Note that for some cpu/os combinations some features will not be detected. -`amd64` has rather good support and should work reliably on all platforms. - -Note that hypervisors may not pass through all CPU features. - -## arm64 feature detection - -Not all operating systems provide ARM features directly -and there is no safe way to do so for the rest. - -Currently `arm64/linux` and `arm64/freebsd` should be quite reliable. -`arm64/darwin` adds features expected from the M1 processor, but a lot remains undetected. - -A `DetectARM()` can be used if you are able to control your deployment, -it will detect CPU features, but may crash if the OS doesn't intercept the calls. -A `-cpu.arm` flag for detecting unsafe ARM features can be added. See below. - -Note that currently only features are detected on ARM, -no additional information is currently available. - -## flags - -It is possible to add flags that affects cpu detection. - -For this the `Flags()` command is provided. - -This must be called *before* `flag.Parse()` AND after the flags have been parsed `Detect()` must be called. - -This means that any detection used in `init()` functions will not contain these flags. - -Example: - -```Go -package main - -import ( - "flag" - "fmt" - "strings" - - "github.com/klauspost/cpuid/v2" -) - -func main() { - cpuid.Flags() - flag.Parse() - cpuid.Detect() - - // Test if we have these specific features: - if cpuid.CPU.Supports(cpuid.SSE, cpuid.SSE2) { - fmt.Println("We have Streaming SIMD 2 Extensions") - } -} -``` - -## commandline - -Download as binary from: https://github.com/klauspost/cpuid/releases - -Install from source: - -`go install github.com/klauspost/cpuid/v2/cmd/cpuid@latest` - -### Example - -``` -λ cpuid -Name: AMD Ryzen 9 3950X 16-Core Processor -Vendor String: AuthenticAMD -Vendor ID: AMD -PhysicalCores: 16 -Threads Per Core: 2 -Logical Cores: 32 -CPU Family 23 Model: 113 -Features: ADX,AESNI,AVX,AVX2,BMI1,BMI2,CLMUL,CLZERO,CMOV,CMPXCHG8,CPBOOST,CX16,F16C,FMA3,FXSR,FXSROPT,HTT,HYPERVISOR,LAHF,LZCNT,MCAOVERFLOW,MMX,MMXEXT,MOVBE,NX,OSXSAVE,POPCNT,RDRAND,RDSEED,RDTSCP,SCE,SHA,SSE,SSE2,SSE3,SSE4,SSE42,SSE4A,SSSE3,SUCCOR,X87,XSAVE -Microarchitecture level: 3 -Cacheline bytes: 64 -L1 Instruction Cache: 32768 bytes -L1 Data Cache: 32768 bytes -L2 Cache: 524288 bytes -L3 Cache: 16777216 bytes - -``` -### JSON Output: - -``` -λ cpuid --json -{ - "BrandName": "AMD Ryzen 9 3950X 16-Core Processor", - "VendorID": 2, - "VendorString": "AuthenticAMD", - "PhysicalCores": 16, - "ThreadsPerCore": 2, - "LogicalCores": 32, - "Family": 23, - "Model": 113, - "CacheLine": 64, - "Hz": 0, - "BoostFreq": 0, - "Cache": { - "L1I": 32768, - "L1D": 32768, - "L2": 524288, - "L3": 16777216 - }, - "SGX": { - "Available": false, - "LaunchControl": false, - "SGX1Supported": false, - "SGX2Supported": false, - "MaxEnclaveSizeNot64": 0, - "MaxEnclaveSize64": 0, - "EPCSections": null - }, - "Features": [ - "ADX", - "AESNI", - "AVX", - "AVX2", - "BMI1", - "BMI2", - "CLMUL", - "CLZERO", - "CMOV", - "CMPXCHG8", - "CPBOOST", - "CX16", - "F16C", - "FMA3", - "FXSR", - "FXSROPT", - "HTT", - "HYPERVISOR", - "LAHF", - "LZCNT", - "MCAOVERFLOW", - "MMX", - "MMXEXT", - "MOVBE", - "NX", - "OSXSAVE", - "POPCNT", - "RDRAND", - "RDSEED", - "RDTSCP", - "SCE", - "SHA", - "SSE", - "SSE2", - "SSE3", - "SSE4", - "SSE42", - "SSE4A", - "SSSE3", - "SUCCOR", - "X87", - "XSAVE" - ], - "X64Level": 3 -} -``` - -### Check CPU microarch level - -``` -λ cpuid --check-level=3 -2022/03/18 17:04:40 AMD Ryzen 9 3950X 16-Core Processor -2022/03/18 17:04:40 Microarchitecture level 3 is supported. Max level is 3. -Exit Code 0 - -λ cpuid --check-level=4 -2022/03/18 17:06:18 AMD Ryzen 9 3950X 16-Core Processor -2022/03/18 17:06:18 Microarchitecture level 4 not supported. Max level is 3. -Exit Code 1 -``` - -# license - -This code is published under an MIT license. See LICENSE file for more information. diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid.go b/vendor/github.com/klauspost/cpuid/v2/cpuid.go deleted file mode 100644 index 51489be..0000000 --- a/vendor/github.com/klauspost/cpuid/v2/cpuid.go +++ /dev/null @@ -1,1075 +0,0 @@ -// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. - -// Package cpuid provides information about the CPU running the current program. -// -// CPU features are detected on startup, and kept for fast access through the life of the application. -// Currently x86 / x64 (AMD64) as well as arm64 is supported. -// -// You can access the CPU information by accessing the shared CPU variable of the cpuid library. -// -// Package home: https://github.com/klauspost/cpuid -package cpuid - -import ( - "math" - "runtime" - "strings" -) - -// AMD refererence: https://www.amd.com/system/files/TechDocs/25481.pdf -// and Processor Programming Reference (PPR) - -// Vendor is a representation of a CPU vendor. -type Vendor int - -const ( - VendorUnknown Vendor = iota - Intel - AMD - VIA - Transmeta - NSC - KVM // Kernel-based Virtual Machine - MSVM // Microsoft Hyper-V or Windows Virtual PC - VMware - XenHVM - Bhyve - Hygon - SiS - RDC - - Ampere - ARM - Broadcom - Cavium - DEC - Fujitsu - Infineon - Motorola - NVIDIA - AMCC - Qualcomm - Marvell - - lastVendor -) - -//go:generate stringer -type=FeatureID,Vendor - -// FeatureID is the ID of a specific cpu feature. -type FeatureID int - -const ( - // Keep index -1 as unknown - UNKNOWN = -1 - - // Add features - ADX FeatureID = iota // Intel ADX (Multi-Precision Add-Carry Instruction Extensions) - AESNI // Advanced Encryption Standard New Instructions - AMD3DNOW // AMD 3DNOW - AMD3DNOWEXT // AMD 3DNowExt - AMXBF16 // Tile computational operations on BFLOAT16 numbers - AMXINT8 // Tile computational operations on 8-bit integers - AMXTILE // Tile architecture - AVX // AVX functions - AVX2 // AVX2 functions - AVX512BF16 // AVX-512 BFLOAT16 Instructions - AVX512BITALG // AVX-512 Bit Algorithms - AVX512BW // AVX-512 Byte and Word Instructions - AVX512CD // AVX-512 Conflict Detection Instructions - AVX512DQ // AVX-512 Doubleword and Quadword Instructions - AVX512ER // AVX-512 Exponential and Reciprocal Instructions - AVX512F // AVX-512 Foundation - AVX512FP16 // AVX-512 FP16 Instructions - AVX512IFMA // AVX-512 Integer Fused Multiply-Add Instructions - AVX512PF // AVX-512 Prefetch Instructions - AVX512VBMI // AVX-512 Vector Bit Manipulation Instructions - AVX512VBMI2 // AVX-512 Vector Bit Manipulation Instructions, Version 2 - AVX512VL // AVX-512 Vector Length Extensions - AVX512VNNI // AVX-512 Vector Neural Network Instructions - AVX512VP2INTERSECT // AVX-512 Intersect for D/Q - AVX512VPOPCNTDQ // AVX-512 Vector Population Count Doubleword and Quadword - AVXSLOW // Indicates the CPU performs 2 128 bit operations instead of one. - BMI1 // Bit Manipulation Instruction Set 1 - BMI2 // Bit Manipulation Instruction Set 2 - CETIBT // Intel CET Indirect Branch Tracking - CETSS // Intel CET Shadow Stack - CLDEMOTE // Cache Line Demote - CLMUL // Carry-less Multiplication - CLZERO // CLZERO instruction supported - CMOV // i686 CMOV - CMPXCHG8 // CMPXCHG8 instruction - CPBOOST // Core Performance Boost - CX16 // CMPXCHG16B Instruction - ENQCMD // Enqueue Command - ERMS // Enhanced REP MOVSB/STOSB - F16C // Half-precision floating-point conversion - FMA3 // Intel FMA 3. Does not imply AVX. - FMA4 // Bulldozer FMA4 functions - FXSR // FXSAVE, FXRESTOR instructions, CR4 bit 9 - FXSROPT // FXSAVE/FXRSTOR optimizations - GFNI // Galois Field New Instructions - HLE // Hardware Lock Elision - HTT // Hyperthreading (enabled) - HWA // Hardware assert supported. Indicates support for MSRC001_10 - HYPERVISOR // This bit has been reserved by Intel & AMD for use by hypervisors - IBPB // Indirect Branch Restricted Speculation (IBRS) and Indirect Branch Predictor Barrier (IBPB) - IBS // Instruction Based Sampling (AMD) - IBSBRNTRGT // Instruction Based Sampling Feature (AMD) - IBSFETCHSAM // Instruction Based Sampling Feature (AMD) - IBSFFV // Instruction Based Sampling Feature (AMD) - IBSOPCNT // Instruction Based Sampling Feature (AMD) - IBSOPCNTEXT // Instruction Based Sampling Feature (AMD) - IBSOPSAM // Instruction Based Sampling Feature (AMD) - IBSRDWROPCNT // Instruction Based Sampling Feature (AMD) - IBSRIPINVALIDCHK // Instruction Based Sampling Feature (AMD) - INT_WBINVD // WBINVD/WBNOINVD are interruptible. - INVLPGB // NVLPGB and TLBSYNC instruction supported - LAHF // LAHF/SAHF in long mode - LZCNT // LZCNT instruction - MCAOVERFLOW // MCA overflow recovery support. - MCOMMIT // MCOMMIT instruction supported - MMX // standard MMX - MMXEXT // SSE integer functions or AMD MMX ext - MOVBE // MOVBE instruction (big-endian) - MOVDIR64B // Move 64 Bytes as Direct Store - MOVDIRI // Move Doubleword as Direct Store - MPX // Intel MPX (Memory Protection Extensions) - MSRIRC // Instruction Retired Counter MSR available - NX // NX (No-Execute) bit - OSXSAVE // XSAVE enabled by OS - POPCNT // POPCNT instruction - RDPRU // RDPRU instruction supported - RDRAND // RDRAND instruction is available - RDSEED // RDSEED instruction is available - RDTSCP // RDTSCP Instruction - RTM // Restricted Transactional Memory - RTM_ALWAYS_ABORT // Indicates that the loaded microcode is forcing RTM abort. - SCE // SYSENTER and SYSEXIT instructions - SERIALIZE // Serialize Instruction Execution - SGX // Software Guard Extensions - SGXLC // Software Guard Extensions Launch Control - SHA // Intel SHA Extensions - SSE // SSE functions - SSE2 // P4 SSE functions - SSE3 // Prescott SSE3 functions - SSE4 // Penryn SSE4.1 functions - SSE42 // Nehalem SSE4.2 functions - SSE4A // AMD Barcelona microarchitecture SSE4a instructions - SSSE3 // Conroe SSSE3 functions - STIBP // Single Thread Indirect Branch Predictors - SUCCOR // Software uncorrectable error containment and recovery capability. - TBM // AMD Trailing Bit Manipulation - TSXLDTRK // Intel TSX Suspend Load Address Tracking - VAES // Vector AES - VMX // Virtual Machine Extensions - VPCLMULQDQ // Carry-Less Multiplication Quadword - WAITPKG // TPAUSE, UMONITOR, UMWAIT - WBNOINVD // Write Back and Do Not Invalidate Cache - X87 // FPU - XOP // Bulldozer XOP functions - XSAVE // XSAVE, XRESTOR, XSETBV, XGETBV - - // ARM features: - AESARM // AES instructions - ARMCPUID // Some CPU ID registers readable at user-level - ASIMD // Advanced SIMD - ASIMDDP // SIMD Dot Product - ASIMDHP // Advanced SIMD half-precision floating point - ASIMDRDM // Rounding Double Multiply Accumulate/Subtract (SQRDMLAH/SQRDMLSH) - ATOMICS // Large System Extensions (LSE) - CRC32 // CRC32/CRC32C instructions - DCPOP // Data cache clean to Point of Persistence (DC CVAP) - EVTSTRM // Generic timer - FCMA // Floatin point complex number addition and multiplication - FP // Single-precision and double-precision floating point - FPHP // Half-precision floating point - GPA // Generic Pointer Authentication - JSCVT // Javascript-style double->int convert (FJCVTZS) - LRCPC // Weaker release consistency (LDAPR, etc) - PMULL // Polynomial Multiply instructions (PMULL/PMULL2) - SHA1 // SHA-1 instructions (SHA1C, etc) - SHA2 // SHA-2 instructions (SHA256H, etc) - SHA3 // SHA-3 instructions (EOR3, RAXI, XAR, BCAX) - SHA512 // SHA512 instructions - SM3 // SM3 instructions - SM4 // SM4 instructions - SVE // Scalable Vector Extension - - // Keep it last. It automatically defines the size of []flagSet - lastID - - firstID FeatureID = UNKNOWN + 1 -) - -// CPUInfo contains information about the detected system CPU. -type CPUInfo struct { - BrandName string // Brand name reported by the CPU - VendorID Vendor // Comparable CPU vendor ID - VendorString string // Raw vendor string. - featureSet flagSet // Features of the CPU - PhysicalCores int // Number of physical processor cores in your CPU. Will be 0 if undetectable. - ThreadsPerCore int // Number of threads per physical core. Will be 1 if undetectable. - LogicalCores int // Number of physical cores times threads that can run on each core through the use of hyperthreading. Will be 0 if undetectable. - Family int // CPU family number - Model int // CPU model number - CacheLine int // Cache line size in bytes. Will be 0 if undetectable. - Hz int64 // Clock speed, if known, 0 otherwise. Will attempt to contain base clock speed. - BoostFreq int64 // Max clock speed, if known, 0 otherwise - Cache struct { - L1I int // L1 Instruction Cache (per core or shared). Will be -1 if undetected - L1D int // L1 Data Cache (per core or shared). Will be -1 if undetected - L2 int // L2 Cache (per core or shared). Will be -1 if undetected - L3 int // L3 Cache (per core, per ccx or shared). Will be -1 if undetected - } - SGX SGXSupport - maxFunc uint32 - maxExFunc uint32 -} - -var cpuid func(op uint32) (eax, ebx, ecx, edx uint32) -var cpuidex func(op, op2 uint32) (eax, ebx, ecx, edx uint32) -var xgetbv func(index uint32) (eax, edx uint32) -var rdtscpAsm func() (eax, ebx, ecx, edx uint32) -var darwinHasAVX512 = func() bool { return false } - -// CPU contains information about the CPU as detected on startup, -// or when Detect last was called. -// -// Use this as the primary entry point to you data. -var CPU CPUInfo - -func init() { - //just set default support nothing - CPU.ThreadsPerCore = 1 - CPU.Cache.L1I = -1 - CPU.Cache.L1D = -1 - CPU.Cache.L2 = -1 - CPU.Cache.L3 = -1 -} - -// Detect will re-detect current CPU info. -// This will replace the content of the exported CPU variable. -// -// Unless you expect the CPU to change while you are running your program -// you should not need to call this function. -// If you call this, you must ensure that no other goroutine is accessing the -// exported CPU variable. -func Detect() { - //do nothing -} - -// DetectARM will detect ARM64 features. -// This is NOT done automatically since it can potentially crash -// if the OS does not handle the command. -// If in the future this can be done safely this function may not -// do anything. -func DetectARM() { - //do nothing -} - -var detectArmFlag *bool -var displayFeats *bool -var disableFlag *string - -// Supports returns whether the CPU supports all of the requested features. -func (c CPUInfo) Supports(ids ...FeatureID) bool { - //support nothing - return false -} - -// Has allows for checking a single feature. -// Should be inlined by the compiler. -func (c CPUInfo) Has(id FeatureID) bool { - //has nothing - return false -} - -// https://en.wikipedia.org/wiki/X86-64#Microarchitecture_levels -var level1Features = flagSetWith(CMOV, CMPXCHG8, X87, FXSR, MMX, SCE, SSE, SSE2) -var level2Features = flagSetWith(CMOV, CMPXCHG8, X87, FXSR, MMX, SCE, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3) -var level3Features = flagSetWith(CMOV, CMPXCHG8, X87, FXSR, MMX, SCE, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3, AVX, AVX2, BMI1, BMI2, F16C, FMA3, LZCNT, MOVBE, OSXSAVE) -var level4Features = flagSetWith(CMOV, CMPXCHG8, X87, FXSR, MMX, SCE, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3, AVX, AVX2, BMI1, BMI2, F16C, FMA3, LZCNT, MOVBE, OSXSAVE, AVX512F, AVX512BW, AVX512CD, AVX512DQ, AVX512VL) - -// X64Level returns the microarchitecture level detected on the CPU. -// If features are lacking or non x64 mode, 0 is returned. -// See https://en.wikipedia.org/wiki/X86-64#Microarchitecture_levels -func (c CPUInfo) X64Level() int { - return 0 -} - -// Disable will disable one or several features. -func (c *CPUInfo) Disable(ids ...FeatureID) bool { - //do nothing - return true -} - -// Enable will disable one or several features even if they were undetected. -// This is of course not recommended for obvious reasons. -func (c *CPUInfo) Enable(ids ...FeatureID) bool { - //do nothing - return true -} - -// IsVendor returns true if vendor is recognized as Intel -func (c CPUInfo) IsVendor(v Vendor) bool { - return c.VendorID == v -} - -func (c CPUInfo) FeatureSet() []string { - s := make([]string, 0) - s = append(s, c.featureSet.Strings()...) - return s -} - -// RTCounter returns the 64-bit time-stamp counter -// Uses the RDTSCP instruction. The value 0 is returned -// if the CPU does not support the instruction. -func (c CPUInfo) RTCounter() uint64 { - if !c.Supports(RDTSCP) { - return 0 - } - a, _, _, d := rdtscpAsm() - return uint64(a) | (uint64(d) << 32) -} - -// Ia32TscAux returns the IA32_TSC_AUX part of the RDTSCP. -// This variable is OS dependent, but on Linux contains information -// about the current cpu/core the code is running on. -// If the RDTSCP instruction isn't supported on the CPU, the value 0 is returned. -func (c CPUInfo) Ia32TscAux() uint32 { - if !c.Supports(RDTSCP) { - return 0 - } - _, _, ecx, _ := rdtscpAsm() - return ecx -} - -// LogicalCPU will return the Logical CPU the code is currently executing on. -// This is likely to change when the OS re-schedules the running thread -// to another CPU. -// If the current core cannot be detected, -1 will be returned. -func (c CPUInfo) LogicalCPU() int { - return 1 -} - -// frequencies tries to compute the clock speed of the CPU. If leaf 15 is -// supported, use it, otherwise parse the brand string. Yes, really. -func (c *CPUInfo) frequencies() { - c.Hz, c.BoostFreq = 0, 0 - mfi := maxFunctionID() - if mfi >= 0x15 { - eax, ebx, ecx, _ := cpuid(0x15) - if eax != 0 && ebx != 0 && ecx != 0 { - c.Hz = (int64(ecx) * int64(ebx)) / int64(eax) - } - } - if mfi >= 0x16 { - a, b, _, _ := cpuid(0x16) - // Base... - if a&0xffff > 0 { - c.Hz = int64(a&0xffff) * 1_000_000 - } - // Boost... - if b&0xffff > 0 { - c.BoostFreq = int64(b&0xffff) * 1_000_000 - } - } - if c.Hz > 0 { - return - } - - // computeHz determines the official rated speed of a CPU from its brand - // string. This insanity is *actually the official documented way to do - // this according to Intel*, prior to leaf 0x15 existing. The official - // documentation only shows this working for exactly `x.xx` or `xxxx` - // cases, e.g., `2.50GHz` or `1300MHz`; this parser will accept other - // sizes. - model := c.BrandName - hz := strings.LastIndex(model, "Hz") - if hz < 3 { - return - } - var multiplier int64 - switch model[hz-1] { - case 'M': - multiplier = 1000 * 1000 - case 'G': - multiplier = 1000 * 1000 * 1000 - case 'T': - multiplier = 1000 * 1000 * 1000 * 1000 - } - if multiplier == 0 { - return - } - freq := int64(0) - divisor := int64(0) - decimalShift := int64(1) - var i int - for i = hz - 2; i >= 0 && model[i] != ' '; i-- { - if model[i] >= '0' && model[i] <= '9' { - freq += int64(model[i]-'0') * decimalShift - decimalShift *= 10 - } else if model[i] == '.' { - if divisor != 0 { - return - } - divisor = decimalShift - } else { - return - } - } - // we didn't find a space - if i < 0 { - return - } - if divisor != 0 { - c.Hz = (freq * multiplier) / divisor - return - } - c.Hz = freq * multiplier -} - -// VM Will return true if the cpu id indicates we are in -// a virtual machine. -func (c CPUInfo) VM() bool { - return CPU.featureSet.inSet(HYPERVISOR) -} - -// flags contains detected cpu features and characteristics -type flags uint64 - -// log2(bits_in_uint64) -const flagBitsLog2 = 6 -const flagBits = 1 << flagBitsLog2 -const flagMask = flagBits - 1 - -// flagSet contains detected cpu features and characteristics in an array of flags -type flagSet [(lastID + flagMask) / flagBits]flags - -func (s flagSet) inSet(feat FeatureID) bool { - return s[feat>>flagBitsLog2]&(1<<(feat&flagMask)) != 0 -} - -func (s *flagSet) set(feat FeatureID) { - s[feat>>flagBitsLog2] |= 1 << (feat & flagMask) -} - -// setIf will set a feature if boolean is true. -func (s *flagSet) setIf(cond bool, features ...FeatureID) { - if cond { - for _, offset := range features { - s[offset>>flagBitsLog2] |= 1 << (offset & flagMask) - } - } -} - -func (s *flagSet) unset(offset FeatureID) { - bit := flags(1 << (offset & flagMask)) - s[offset>>flagBitsLog2] = s[offset>>flagBitsLog2] & ^bit -} - -// or with another flagset. -func (s *flagSet) or(other flagSet) { - for i, v := range other[:] { - s[i] |= v - } -} - -// hasSet returns whether all features are present. -func (s flagSet) hasSet(other flagSet) bool { - for i, v := range other[:] { - if s[i]&v != v { - return false - } - } - return true -} - -func flagSetWith(feat ...FeatureID) flagSet { - var res flagSet - for _, f := range feat { - res.set(f) - } - return res -} - -// ParseFeature will parse the string and return the ID of the matching feature. -// Will return UNKNOWN if not found. -func ParseFeature(s string) FeatureID { - s = strings.ToUpper(s) - for i := firstID; i < lastID; i++ { - if i.String() == s { - return i - } - } - return UNKNOWN -} - -// Strings returns an array of the detected features for FlagsSet. -func (s flagSet) Strings() []string { - if len(s) == 0 { - return []string{""} - } - r := make([]string, 0) - for i := firstID; i < lastID; i++ { - if s.inSet(i) { - r = append(r, i.String()) - } - } - return r -} - -func maxExtendedFunction() uint32 { - eax, _, _, _ := cpuid(0x80000000) - return eax -} - -func maxFunctionID() uint32 { - a, _, _, _ := cpuid(0) - return a -} - -func brandName() string { - if maxExtendedFunction() >= 0x80000004 { - v := make([]uint32, 0, 48) - for i := uint32(0); i < 3; i++ { - a, b, c, d := cpuid(0x80000002 + i) - v = append(v, a, b, c, d) - } - return strings.Trim(string(valAsString(v...)), " ") - } - return "unknown" -} - -func threadsPerCore() int { - mfi := maxFunctionID() - vend, _ := vendorID() - - if mfi < 0x4 || (vend != Intel && vend != AMD) { - return 1 - } - - if mfi < 0xb { - if vend != Intel { - return 1 - } - _, b, _, d := cpuid(1) - if (d & (1 << 28)) != 0 { - // v will contain logical core count - v := (b >> 16) & 255 - if v > 1 { - a4, _, _, _ := cpuid(4) - // physical cores - v2 := (a4 >> 26) + 1 - if v2 > 0 { - return int(v) / int(v2) - } - } - } - return 1 - } - _, b, _, _ := cpuidex(0xb, 0) - if b&0xffff == 0 { - if vend == AMD { - // Workaround for AMD returning 0, assume 2 if >= Zen 2 - // It will be more correct than not. - fam, _ := familyModel() - _, _, _, d := cpuid(1) - if (d&(1<<28)) != 0 && fam >= 23 { - return 2 - } - } - return 1 - } - return int(b & 0xffff) -} - -func logicalCores() int { - mfi := maxFunctionID() - v, _ := vendorID() - switch v { - case Intel: - // Use this on old Intel processors - if mfi < 0xb { - if mfi < 1 { - return 0 - } - // CPUID.1:EBX[23:16] represents the maximum number of addressable IDs (initial APIC ID) - // that can be assigned to logical processors in a physical package. - // The value may not be the same as the number of logical processors that are present in the hardware of a physical package. - _, ebx, _, _ := cpuid(1) - logical := (ebx >> 16) & 0xff - return int(logical) - } - _, b, _, _ := cpuidex(0xb, 1) - return int(b & 0xffff) - case AMD, Hygon: - _, b, _, _ := cpuid(1) - return int((b >> 16) & 0xff) - default: - return 0 - } -} - -func familyModel() (int, int) { - if maxFunctionID() < 0x1 { - return 0, 0 - } - eax, _, _, _ := cpuid(1) - family := ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff) - model := ((eax >> 4) & 0xf) + ((eax >> 12) & 0xf0) - return int(family), int(model) -} - -func physicalCores() int { - v, _ := vendorID() - switch v { - case Intel: - return logicalCores() / threadsPerCore() - case AMD, Hygon: - lc := logicalCores() - tpc := threadsPerCore() - if lc > 0 && tpc > 0 { - return lc / tpc - } - - // The following is inaccurate on AMD EPYC 7742 64-Core Processor - if maxExtendedFunction() >= 0x80000008 { - _, _, c, _ := cpuid(0x80000008) - if c&0xff > 0 { - return int(c&0xff) + 1 - } - } - } - return 0 -} - -// Except from http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID -var vendorMapping = map[string]Vendor{ - "AMDisbetter!": AMD, - "AuthenticAMD": AMD, - "CentaurHauls": VIA, - "GenuineIntel": Intel, - "TransmetaCPU": Transmeta, - "GenuineTMx86": Transmeta, - "Geode by NSC": NSC, - "VIA VIA VIA ": VIA, - "KVMKVMKVMKVM": KVM, - "Microsoft Hv": MSVM, - "VMwareVMware": VMware, - "XenVMMXenVMM": XenHVM, - "bhyve bhyve ": Bhyve, - "HygonGenuine": Hygon, - "Vortex86 SoC": SiS, - "SiS SiS SiS ": SiS, - "RiseRiseRise": SiS, - "Genuine RDC": RDC, -} - -func vendorID() (Vendor, string) { - _, b, c, d := cpuid(0) - v := string(valAsString(b, d, c)) - vend, ok := vendorMapping[v] - if !ok { - return VendorUnknown, v - } - return vend, v -} - -func cacheLine() int { - if maxFunctionID() < 0x1 { - return 0 - } - - _, ebx, _, _ := cpuid(1) - cache := (ebx & 0xff00) >> 5 // cflush size - if cache == 0 && maxExtendedFunction() >= 0x80000006 { - _, _, ecx, _ := cpuid(0x80000006) - cache = ecx & 0xff // cacheline size - } - // TODO: Read from Cache and TLB Information - return int(cache) -} - -func (c *CPUInfo) cacheSize() { - c.Cache.L1D = -1 - c.Cache.L1I = -1 - c.Cache.L2 = -1 - c.Cache.L3 = -1 - vendor, _ := vendorID() - switch vendor { - case Intel: - if maxFunctionID() < 4 { - return - } - c.Cache.L1I, c.Cache.L1D, c.Cache.L2, c.Cache.L3 = 0, 0, 0, 0 - for i := uint32(0); ; i++ { - eax, ebx, ecx, _ := cpuidex(4, i) - cacheType := eax & 15 - if cacheType == 0 { - break - } - cacheLevel := (eax >> 5) & 7 - coherency := int(ebx&0xfff) + 1 - partitions := int((ebx>>12)&0x3ff) + 1 - associativity := int((ebx>>22)&0x3ff) + 1 - sets := int(ecx) + 1 - size := associativity * partitions * coherency * sets - switch cacheLevel { - case 1: - if cacheType == 1 { - // 1 = Data Cache - c.Cache.L1D = size - } else if cacheType == 2 { - // 2 = Instruction Cache - c.Cache.L1I = size - } else { - if c.Cache.L1D < 0 { - c.Cache.L1I = size - } - if c.Cache.L1I < 0 { - c.Cache.L1I = size - } - } - case 2: - c.Cache.L2 = size - case 3: - c.Cache.L3 = size - } - } - case AMD, Hygon: - // Untested. - if maxExtendedFunction() < 0x80000005 { - return - } - _, _, ecx, edx := cpuid(0x80000005) - c.Cache.L1D = int(((ecx >> 24) & 0xFF) * 1024) - c.Cache.L1I = int(((edx >> 24) & 0xFF) * 1024) - - if maxExtendedFunction() < 0x80000006 { - return - } - _, _, ecx, _ = cpuid(0x80000006) - c.Cache.L2 = int(((ecx >> 16) & 0xFFFF) * 1024) - - // CPUID Fn8000_001D_EAX_x[N:0] Cache Properties - if maxExtendedFunction() < 0x8000001D { - return - } - for i := uint32(0); i < math.MaxUint32; i++ { - eax, ebx, ecx, _ := cpuidex(0x8000001D, i) - - level := (eax >> 5) & 7 - cacheNumSets := ecx + 1 - cacheLineSize := 1 + (ebx & 2047) - cachePhysPartitions := 1 + ((ebx >> 12) & 511) - cacheNumWays := 1 + ((ebx >> 22) & 511) - - typ := eax & 15 - size := int(cacheNumSets * cacheLineSize * cachePhysPartitions * cacheNumWays) - if typ == 0 { - return - } - - switch level { - case 1: - switch typ { - case 1: - // Data cache - c.Cache.L1D = size - case 2: - // Inst cache - c.Cache.L1I = size - default: - if c.Cache.L1D < 0 { - c.Cache.L1I = size - } - if c.Cache.L1I < 0 { - c.Cache.L1I = size - } - } - case 2: - c.Cache.L2 = size - case 3: - c.Cache.L3 = size - } - } - } -} - -type SGXEPCSection struct { - BaseAddress uint64 - EPCSize uint64 -} - -type SGXSupport struct { - Available bool - LaunchControl bool - SGX1Supported bool - SGX2Supported bool - MaxEnclaveSizeNot64 int64 - MaxEnclaveSize64 int64 - EPCSections []SGXEPCSection -} - -func hasSGX(available, lc bool) (rval SGXSupport) { - rval.Available = available - - if !available { - return - } - - rval.LaunchControl = lc - - a, _, _, d := cpuidex(0x12, 0) - rval.SGX1Supported = a&0x01 != 0 - rval.SGX2Supported = a&0x02 != 0 - rval.MaxEnclaveSizeNot64 = 1 << (d & 0xFF) // pow 2 - rval.MaxEnclaveSize64 = 1 << ((d >> 8) & 0xFF) // pow 2 - rval.EPCSections = make([]SGXEPCSection, 0) - - for subleaf := uint32(2); subleaf < 2+8; subleaf++ { - eax, ebx, ecx, edx := cpuidex(0x12, subleaf) - leafType := eax & 0xf - - if leafType == 0 { - // Invalid subleaf, stop iterating - break - } else if leafType == 1 { - // EPC Section subleaf - baseAddress := uint64(eax&0xfffff000) + (uint64(ebx&0x000fffff) << 32) - size := uint64(ecx&0xfffff000) + (uint64(edx&0x000fffff) << 32) - - section := SGXEPCSection{BaseAddress: baseAddress, EPCSize: size} - rval.EPCSections = append(rval.EPCSections, section) - } - } - - return -} - -func support() flagSet { - var fs flagSet - mfi := maxFunctionID() - vend, _ := vendorID() - if mfi < 0x1 { - return fs - } - family, model := familyModel() - - _, _, c, d := cpuid(1) - fs.setIf((d&(1<<0)) != 0, X87) - fs.setIf((d&(1<<8)) != 0, CMPXCHG8) - fs.setIf((d&(1<<11)) != 0, SCE) - fs.setIf((d&(1<<15)) != 0, CMOV) - fs.setIf((d&(1<<22)) != 0, MMXEXT) - fs.setIf((d&(1<<23)) != 0, MMX) - fs.setIf((d&(1<<24)) != 0, FXSR) - fs.setIf((d&(1<<25)) != 0, FXSROPT) - fs.setIf((d&(1<<25)) != 0, SSE) - fs.setIf((d&(1<<26)) != 0, SSE2) - fs.setIf((c&1) != 0, SSE3) - fs.setIf((c&(1<<5)) != 0, VMX) - fs.setIf((c&0x00000200) != 0, SSSE3) - fs.setIf((c&0x00080000) != 0, SSE4) - fs.setIf((c&0x00100000) != 0, SSE42) - fs.setIf((c&(1<<25)) != 0, AESNI) - fs.setIf((c&(1<<1)) != 0, CLMUL) - fs.setIf(c&(1<<22) != 0, MOVBE) - fs.setIf(c&(1<<23) != 0, POPCNT) - fs.setIf(c&(1<<30) != 0, RDRAND) - - // This bit has been reserved by Intel & AMD for use by hypervisors, - // and indicates the presence of a hypervisor. - fs.setIf(c&(1<<31) != 0, HYPERVISOR) - fs.setIf(c&(1<<29) != 0, F16C) - fs.setIf(c&(1<<13) != 0, CX16) - - if vend == Intel && (d&(1<<28)) != 0 && mfi >= 4 { - fs.setIf(threadsPerCore() > 1, HTT) - } - if vend == AMD && (d&(1<<28)) != 0 && mfi >= 4 { - fs.setIf(threadsPerCore() > 1, HTT) - } - fs.setIf(c&1<<26 != 0, XSAVE) - fs.setIf(c&1<<27 != 0, OSXSAVE) - // Check XGETBV/XSAVE (26), OXSAVE (27) and AVX (28) bits - const avxCheck = 1<<26 | 1<<27 | 1<<28 - if c&avxCheck == avxCheck { - // Check for OS support - eax, _ := xgetbv(0) - if (eax & 0x6) == 0x6 { - fs.set(AVX) - switch vend { - case Intel: - // Older than Haswell. - fs.setIf(family == 6 && model < 60, AVXSLOW) - case AMD: - // Older than Zen 2 - fs.setIf(family < 23 || (family == 23 && model < 49), AVXSLOW) - } - } - } - // FMA3 can be used with SSE registers, so no OS support is strictly needed. - // fma3 and OSXSAVE needed. - const fma3Check = 1<<12 | 1<<27 - fs.setIf(c&fma3Check == fma3Check, FMA3) - - // Check AVX2, AVX2 requires OS support, but BMI1/2 don't. - if mfi >= 7 { - _, ebx, ecx, edx := cpuidex(7, 0) - eax1, _, _, _ := cpuidex(7, 1) - if fs.inSet(AVX) && (ebx&0x00000020) != 0 { - fs.set(AVX2) - } - // CPUID.(EAX=7, ECX=0).EBX - if (ebx & 0x00000008) != 0 { - fs.set(BMI1) - fs.setIf((ebx&0x00000100) != 0, BMI2) - } - fs.setIf(ebx&(1<<2) != 0, SGX) - fs.setIf(ebx&(1<<4) != 0, HLE) - fs.setIf(ebx&(1<<9) != 0, ERMS) - fs.setIf(ebx&(1<<11) != 0, RTM) - fs.setIf(ebx&(1<<14) != 0, MPX) - fs.setIf(ebx&(1<<18) != 0, RDSEED) - fs.setIf(ebx&(1<<19) != 0, ADX) - fs.setIf(ebx&(1<<29) != 0, SHA) - // CPUID.(EAX=7, ECX=0).ECX - fs.setIf(ecx&(1<<5) != 0, WAITPKG) - fs.setIf(ecx&(1<<7) != 0, CETSS) - fs.setIf(ecx&(1<<25) != 0, CLDEMOTE) - fs.setIf(ecx&(1<<27) != 0, MOVDIRI) - fs.setIf(ecx&(1<<28) != 0, MOVDIR64B) - fs.setIf(ecx&(1<<29) != 0, ENQCMD) - fs.setIf(ecx&(1<<30) != 0, SGXLC) - // CPUID.(EAX=7, ECX=0).EDX - fs.setIf(edx&(1<<11) != 0, RTM_ALWAYS_ABORT) - fs.setIf(edx&(1<<14) != 0, SERIALIZE) - fs.setIf(edx&(1<<16) != 0, TSXLDTRK) - fs.setIf(edx&(1<<20) != 0, CETIBT) - fs.setIf(edx&(1<<26) != 0, IBPB) - fs.setIf(edx&(1<<27) != 0, STIBP) - - // Only detect AVX-512 features if XGETBV is supported - if c&((1<<26)|(1<<27)) == (1<<26)|(1<<27) { - // Check for OS support - eax, _ := xgetbv(0) - - // Verify that XCR0[7:5] = ‘111b’ (OPMASK state, upper 256-bit of ZMM0-ZMM15 and - // ZMM16-ZMM31 state are enabled by OS) - /// and that XCR0[2:1] = ‘11b’ (XMM state and YMM state are enabled by OS). - hasAVX512 := (eax>>5)&7 == 7 && (eax>>1)&3 == 3 - if runtime.GOOS == "darwin" { - hasAVX512 = fs.inSet(AVX) && darwinHasAVX512() - } - if hasAVX512 { - fs.setIf(ebx&(1<<16) != 0, AVX512F) - fs.setIf(ebx&(1<<17) != 0, AVX512DQ) - fs.setIf(ebx&(1<<21) != 0, AVX512IFMA) - fs.setIf(ebx&(1<<26) != 0, AVX512PF) - fs.setIf(ebx&(1<<27) != 0, AVX512ER) - fs.setIf(ebx&(1<<28) != 0, AVX512CD) - fs.setIf(ebx&(1<<30) != 0, AVX512BW) - fs.setIf(ebx&(1<<31) != 0, AVX512VL) - // ecx - fs.setIf(ecx&(1<<1) != 0, AVX512VBMI) - fs.setIf(ecx&(1<<6) != 0, AVX512VBMI2) - fs.setIf(ecx&(1<<8) != 0, GFNI) - fs.setIf(ecx&(1<<9) != 0, VAES) - fs.setIf(ecx&(1<<10) != 0, VPCLMULQDQ) - fs.setIf(ecx&(1<<11) != 0, AVX512VNNI) - fs.setIf(ecx&(1<<12) != 0, AVX512BITALG) - fs.setIf(ecx&(1<<14) != 0, AVX512VPOPCNTDQ) - // edx - fs.setIf(edx&(1<<8) != 0, AVX512VP2INTERSECT) - fs.setIf(edx&(1<<22) != 0, AMXBF16) - fs.setIf(edx&(1<<23) != 0, AVX512FP16) - fs.setIf(edx&(1<<24) != 0, AMXTILE) - fs.setIf(edx&(1<<25) != 0, AMXINT8) - // eax1 = CPUID.(EAX=7, ECX=1).EAX - fs.setIf(eax1&(1<<5) != 0, AVX512BF16) - } - } - } - - if maxExtendedFunction() >= 0x80000001 { - _, _, c, d := cpuid(0x80000001) - if (c & (1 << 5)) != 0 { - fs.set(LZCNT) - fs.set(POPCNT) - } - fs.setIf((c&(1<<0)) != 0, LAHF) - fs.setIf((c&(1<<10)) != 0, IBS) - fs.setIf((d&(1<<31)) != 0, AMD3DNOW) - fs.setIf((d&(1<<30)) != 0, AMD3DNOWEXT) - fs.setIf((d&(1<<23)) != 0, MMX) - fs.setIf((d&(1<<22)) != 0, MMXEXT) - fs.setIf((c&(1<<6)) != 0, SSE4A) - fs.setIf(d&(1<<20) != 0, NX) - fs.setIf(d&(1<<27) != 0, RDTSCP) - - /* XOP and FMA4 use the AVX instruction coding scheme, so they can't be - * used unless the OS has AVX support. */ - if fs.inSet(AVX) { - fs.setIf((c&0x00000800) != 0, XOP) - fs.setIf((c&0x00010000) != 0, FMA4) - } - - } - if maxExtendedFunction() >= 0x80000007 { - _, b, _, d := cpuid(0x80000007) - fs.setIf((b&(1<<0)) != 0, MCAOVERFLOW) - fs.setIf((b&(1<<1)) != 0, SUCCOR) - fs.setIf((b&(1<<2)) != 0, HWA) - fs.setIf((d&(1<<9)) != 0, CPBOOST) - } - - if maxExtendedFunction() >= 0x80000008 { - _, b, _, _ := cpuid(0x80000008) - fs.setIf((b&(1<<9)) != 0, WBNOINVD) - fs.setIf((b&(1<<8)) != 0, MCOMMIT) - fs.setIf((b&(1<<13)) != 0, INT_WBINVD) - fs.setIf((b&(1<<4)) != 0, RDPRU) - fs.setIf((b&(1<<3)) != 0, INVLPGB) - fs.setIf((b&(1<<1)) != 0, MSRIRC) - fs.setIf((b&(1<<0)) != 0, CLZERO) - } - - if maxExtendedFunction() >= 0x8000001b && fs.inSet(IBS) { - eax, _, _, _ := cpuid(0x8000001b) - fs.setIf((eax>>0)&1 == 1, IBSFFV) - fs.setIf((eax>>1)&1 == 1, IBSFETCHSAM) - fs.setIf((eax>>2)&1 == 1, IBSOPSAM) - fs.setIf((eax>>3)&1 == 1, IBSRDWROPCNT) - fs.setIf((eax>>4)&1 == 1, IBSOPCNT) - fs.setIf((eax>>5)&1 == 1, IBSBRNTRGT) - fs.setIf((eax>>6)&1 == 1, IBSOPCNTEXT) - fs.setIf((eax>>7)&1 == 1, IBSRIPINVALIDCHK) - } - - return fs -} - -func valAsString(values ...uint32) []byte { - r := make([]byte, 4*len(values)) - for i, v := range values { - dst := r[i*4:] - dst[0] = byte(v & 0xff) - dst[1] = byte((v >> 8) & 0xff) - dst[2] = byte((v >> 16) & 0xff) - dst[3] = byte((v >> 24) & 0xff) - switch { - case dst[0] == 0: - return r[:i*4] - case dst[1] == 0: - return r[:i*4+1] - case dst[2] == 0: - return r[:i*4+2] - case dst[3] == 0: - return r[:i*4+3] - } - } - return r -} diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid_386.s b/vendor/github.com/klauspost/cpuid/v2/cpuid_386.s deleted file mode 100644 index 8587c3a..0000000 --- a/vendor/github.com/klauspost/cpuid/v2/cpuid_386.s +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. - -//+build 386,!gccgo,!noasm,!appengine - -// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) -TEXT ·asmCpuid(SB), 7, $0 - XORL CX, CX - MOVL op+0(FP), AX - CPUID - MOVL AX, eax+4(FP) - MOVL BX, ebx+8(FP) - MOVL CX, ecx+12(FP) - MOVL DX, edx+16(FP) - RET - -// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) -TEXT ·asmCpuidex(SB), 7, $0 - MOVL op+0(FP), AX - MOVL op2+4(FP), CX - CPUID - MOVL AX, eax+8(FP) - MOVL BX, ebx+12(FP) - MOVL CX, ecx+16(FP) - MOVL DX, edx+20(FP) - RET - -// func xgetbv(index uint32) (eax, edx uint32) -TEXT ·asmXgetbv(SB), 7, $0 - MOVL index+0(FP), CX - BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV - MOVL AX, eax+4(FP) - MOVL DX, edx+8(FP) - RET - -// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) -TEXT ·asmRdtscpAsm(SB), 7, $0 - BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP - MOVL AX, eax+0(FP) - MOVL BX, ebx+4(FP) - MOVL CX, ecx+8(FP) - MOVL DX, edx+12(FP) - RET - -// func asmDarwinHasAVX512() bool -TEXT ·asmDarwinHasAVX512(SB), 7, $0 - MOVL $0, eax+0(FP) - RET diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid_amd64.s b/vendor/github.com/klauspost/cpuid/v2/cpuid_amd64.s deleted file mode 100644 index bc11f89..0000000 --- a/vendor/github.com/klauspost/cpuid/v2/cpuid_amd64.s +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. - -//+build amd64,!gccgo,!noasm,!appengine - -// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) -TEXT ·asmCpuid(SB), 7, $0 - XORQ CX, CX - MOVL op+0(FP), AX - CPUID - MOVL AX, eax+8(FP) - MOVL BX, ebx+12(FP) - MOVL CX, ecx+16(FP) - MOVL DX, edx+20(FP) - RET - -// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) -TEXT ·asmCpuidex(SB), 7, $0 - MOVL op+0(FP), AX - MOVL op2+4(FP), CX - CPUID - MOVL AX, eax+8(FP) - MOVL BX, ebx+12(FP) - MOVL CX, ecx+16(FP) - MOVL DX, edx+20(FP) - RET - -// func asmXgetbv(index uint32) (eax, edx uint32) -TEXT ·asmXgetbv(SB), 7, $0 - MOVL index+0(FP), CX - BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV - MOVL AX, eax+8(FP) - MOVL DX, edx+12(FP) - RET - -// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) -TEXT ·asmRdtscpAsm(SB), 7, $0 - BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP - MOVL AX, eax+0(FP) - MOVL BX, ebx+4(FP) - MOVL CX, ecx+8(FP) - MOVL DX, edx+12(FP) - RET - -// From https://go-review.googlesource.com/c/sys/+/285572/ -// func asmDarwinHasAVX512() bool -TEXT ·asmDarwinHasAVX512(SB), 7, $0-1 - MOVB $0, ret+0(FP) // default to false - -#ifdef GOOS_darwin // return if not darwin -#ifdef GOARCH_amd64 // return if not amd64 -// These values from: -// https://github.com/apple/darwin-xnu/blob/xnu-4570.1.46/osfmk/i386/cpu_capabilities.h -#define commpage64_base_address 0x00007fffffe00000 -#define commpage64_cpu_capabilities64 (commpage64_base_address+0x010) -#define commpage64_version (commpage64_base_address+0x01E) -#define hasAVX512F 0x0000004000000000 - MOVQ $commpage64_version, BX - MOVW (BX), AX - CMPW AX, $13 // versions < 13 do not support AVX512 - JL no_avx512 - MOVQ $commpage64_cpu_capabilities64, BX - MOVQ (BX), AX - MOVQ $hasAVX512F, CX - ANDQ CX, AX - JZ no_avx512 - MOVB $1, ret+0(FP) - -no_avx512: -#endif -#endif - RET - diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid_arm64.s b/vendor/github.com/klauspost/cpuid/v2/cpuid_arm64.s deleted file mode 100644 index b31d6ae..0000000 --- a/vendor/github.com/klauspost/cpuid/v2/cpuid_arm64.s +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. - -//+build arm64,!gccgo,!noasm,!appengine - -// See https://www.kernel.org/doc/Documentation/arm64/cpu-feature-registers.txt - -// func getMidr -TEXT ·getMidr(SB), 7, $0 - WORD $0xd5380000 // mrs x0, midr_el1 /* Main ID Register */ - MOVD R0, midr+0(FP) - RET - -// func getProcFeatures -TEXT ·getProcFeatures(SB), 7, $0 - WORD $0xd5380400 // mrs x0, id_aa64pfr0_el1 /* Processor Feature Register 0 */ - MOVD R0, procFeatures+0(FP) - RET - -// func getInstAttributes -TEXT ·getInstAttributes(SB), 7, $0 - WORD $0xd5380600 // mrs x0, id_aa64isar0_el1 /* Instruction Set Attribute Register 0 */ - WORD $0xd5380621 // mrs x1, id_aa64isar1_el1 /* Instruction Set Attribute Register 1 */ - MOVD R0, instAttrReg0+0(FP) - MOVD R1, instAttrReg1+8(FP) - RET - diff --git a/vendor/github.com/klauspost/cpuid/v2/detect_arm64.go b/vendor/github.com/klauspost/cpuid/v2/detect_arm64.go deleted file mode 100644 index 9a53504..0000000 --- a/vendor/github.com/klauspost/cpuid/v2/detect_arm64.go +++ /dev/null @@ -1,247 +0,0 @@ -// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. - -//go:build arm64 && !gccgo && !noasm && !appengine -// +build arm64,!gccgo,!noasm,!appengine - -package cpuid - -import "runtime" - -func getMidr() (midr uint64) -func getProcFeatures() (procFeatures uint64) -func getInstAttributes() (instAttrReg0, instAttrReg1 uint64) - -func initCPU() { - cpuid = func(uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 } - cpuidex = func(x, y uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 } - xgetbv = func(uint32) (a, b uint32) { return 0, 0 } - rdtscpAsm = func() (a, b, c, d uint32) { return 0, 0, 0, 0 } -} - -func addInfo(c *CPUInfo, safe bool) { - // Seems to be safe to assume on ARM64 - c.CacheLine = 64 - detectOS(c) - - // ARM64 disabled since it may crash if interrupt is not intercepted by OS. - if safe && !c.Supports(ARMCPUID) && runtime.GOOS != "freebsd" { - return - } - midr := getMidr() - - // MIDR_EL1 - Main ID Register - // https://developer.arm.com/docs/ddi0595/h/aarch64-system-registers/midr_el1 - // x--------------------------------------------------x - // | Name | bits | visible | - // |--------------------------------------------------| - // | Implementer | [31-24] | y | - // |--------------------------------------------------| - // | Variant | [23-20] | y | - // |--------------------------------------------------| - // | Architecture | [19-16] | y | - // |--------------------------------------------------| - // | PartNum | [15-4] | y | - // |--------------------------------------------------| - // | Revision | [3-0] | y | - // x--------------------------------------------------x - - switch (midr >> 24) & 0xff { - case 0xC0: - c.VendorString = "Ampere Computing" - c.VendorID = Ampere - case 0x41: - c.VendorString = "Arm Limited" - c.VendorID = ARM - case 0x42: - c.VendorString = "Broadcom Corporation" - c.VendorID = Broadcom - case 0x43: - c.VendorString = "Cavium Inc" - c.VendorID = Cavium - case 0x44: - c.VendorString = "Digital Equipment Corporation" - c.VendorID = DEC - case 0x46: - c.VendorString = "Fujitsu Ltd" - c.VendorID = Fujitsu - case 0x49: - c.VendorString = "Infineon Technologies AG" - c.VendorID = Infineon - case 0x4D: - c.VendorString = "Motorola or Freescale Semiconductor Inc" - c.VendorID = Motorola - case 0x4E: - c.VendorString = "NVIDIA Corporation" - c.VendorID = NVIDIA - case 0x50: - c.VendorString = "Applied Micro Circuits Corporation" - c.VendorID = AMCC - case 0x51: - c.VendorString = "Qualcomm Inc" - c.VendorID = Qualcomm - case 0x56: - c.VendorString = "Marvell International Ltd" - c.VendorID = Marvell - case 0x69: - c.VendorString = "Intel Corporation" - c.VendorID = Intel - } - - // Lower 4 bits: Architecture - // Architecture Meaning - // 0b0001 Armv4. - // 0b0010 Armv4T. - // 0b0011 Armv5 (obsolete). - // 0b0100 Armv5T. - // 0b0101 Armv5TE. - // 0b0110 Armv5TEJ. - // 0b0111 Armv6. - // 0b1111 Architectural features are individually identified in the ID_* registers, see 'ID registers'. - // Upper 4 bit: Variant - // An IMPLEMENTATION DEFINED variant number. - // Typically, this field is used to distinguish between different product variants, or major revisions of a product. - c.Family = int(midr>>16) & 0xff - - // PartNum, bits [15:4] - // An IMPLEMENTATION DEFINED primary part number for the device. - // On processors implemented by Arm, if the top four bits of the primary - // part number are 0x0 or 0x7, the variant and architecture are encoded differently. - // Revision, bits [3:0] - // An IMPLEMENTATION DEFINED revision number for the device. - c.Model = int(midr) & 0xffff - - procFeatures := getProcFeatures() - - // ID_AA64PFR0_EL1 - Processor Feature Register 0 - // x--------------------------------------------------x - // | Name | bits | visible | - // |--------------------------------------------------| - // | DIT | [51-48] | y | - // |--------------------------------------------------| - // | SVE | [35-32] | y | - // |--------------------------------------------------| - // | GIC | [27-24] | n | - // |--------------------------------------------------| - // | AdvSIMD | [23-20] | y | - // |--------------------------------------------------| - // | FP | [19-16] | y | - // |--------------------------------------------------| - // | EL3 | [15-12] | n | - // |--------------------------------------------------| - // | EL2 | [11-8] | n | - // |--------------------------------------------------| - // | EL1 | [7-4] | n | - // |--------------------------------------------------| - // | EL0 | [3-0] | n | - // x--------------------------------------------------x - - var f flagSet - // if procFeatures&(0xf<<48) != 0 { - // fmt.Println("DIT") - // } - f.setIf(procFeatures&(0xf<<32) != 0, SVE) - if procFeatures&(0xf<<20) != 15<<20 { - f.set(ASIMD) - // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64pfr0_el1 - // 0b0001 --> As for 0b0000, and also includes support for half-precision floating-point arithmetic. - f.setIf(procFeatures&(0xf<<20) == 1<<20, FPHP, ASIMDHP) - } - f.setIf(procFeatures&(0xf<<16) != 0, FP) - - instAttrReg0, instAttrReg1 := getInstAttributes() - - // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar0_el1 - // - // ID_AA64ISAR0_EL1 - Instruction Set Attribute Register 0 - // x--------------------------------------------------x - // | Name | bits | visible | - // |--------------------------------------------------| - // | TS | [55-52] | y | - // |--------------------------------------------------| - // | FHM | [51-48] | y | - // |--------------------------------------------------| - // | DP | [47-44] | y | - // |--------------------------------------------------| - // | SM4 | [43-40] | y | - // |--------------------------------------------------| - // | SM3 | [39-36] | y | - // |--------------------------------------------------| - // | SHA3 | [35-32] | y | - // |--------------------------------------------------| - // | RDM | [31-28] | y | - // |--------------------------------------------------| - // | ATOMICS | [23-20] | y | - // |--------------------------------------------------| - // | CRC32 | [19-16] | y | - // |--------------------------------------------------| - // | SHA2 | [15-12] | y | - // |--------------------------------------------------| - // | SHA1 | [11-8] | y | - // |--------------------------------------------------| - // | AES | [7-4] | y | - // x--------------------------------------------------x - - // if instAttrReg0&(0xf<<52) != 0 { - // fmt.Println("TS") - // } - // if instAttrReg0&(0xf<<48) != 0 { - // fmt.Println("FHM") - // } - f.setIf(instAttrReg0&(0xf<<44) != 0, ASIMDDP) - f.setIf(instAttrReg0&(0xf<<40) != 0, SM4) - f.setIf(instAttrReg0&(0xf<<36) != 0, SM3) - f.setIf(instAttrReg0&(0xf<<32) != 0, SHA3) - f.setIf(instAttrReg0&(0xf<<28) != 0, ASIMDRDM) - f.setIf(instAttrReg0&(0xf<<20) != 0, ATOMICS) - f.setIf(instAttrReg0&(0xf<<16) != 0, CRC32) - f.setIf(instAttrReg0&(0xf<<12) != 0, SHA2) - // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar0_el1 - // 0b0010 --> As 0b0001, plus SHA512H, SHA512H2, SHA512SU0, and SHA512SU1 instructions implemented. - f.setIf(instAttrReg0&(0xf<<12) == 2<<12, SHA512) - f.setIf(instAttrReg0&(0xf<<8) != 0, SHA1) - f.setIf(instAttrReg0&(0xf<<4) != 0, AESARM) - // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar0_el1 - // 0b0010 --> As for 0b0001, plus PMULL/PMULL2 instructions operating on 64-bit data quantities. - f.setIf(instAttrReg0&(0xf<<4) == 2<<4, PMULL) - - // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar1_el1 - // - // ID_AA64ISAR1_EL1 - Instruction set attribute register 1 - // x--------------------------------------------------x - // | Name | bits | visible | - // |--------------------------------------------------| - // | GPI | [31-28] | y | - // |--------------------------------------------------| - // | GPA | [27-24] | y | - // |--------------------------------------------------| - // | LRCPC | [23-20] | y | - // |--------------------------------------------------| - // | FCMA | [19-16] | y | - // |--------------------------------------------------| - // | JSCVT | [15-12] | y | - // |--------------------------------------------------| - // | API | [11-8] | y | - // |--------------------------------------------------| - // | APA | [7-4] | y | - // |--------------------------------------------------| - // | DPB | [3-0] | y | - // x--------------------------------------------------x - - // if instAttrReg1&(0xf<<28) != 0 { - // fmt.Println("GPI") - // } - f.setIf(instAttrReg1&(0xf<<28) != 24, GPA) - f.setIf(instAttrReg1&(0xf<<20) != 0, LRCPC) - f.setIf(instAttrReg1&(0xf<<16) != 0, FCMA) - f.setIf(instAttrReg1&(0xf<<12) != 0, JSCVT) - // if instAttrReg1&(0xf<<8) != 0 { - // fmt.Println("API") - // } - // if instAttrReg1&(0xf<<4) != 0 { - // fmt.Println("APA") - // } - f.setIf(instAttrReg1&(0xf<<0) != 0, DCPOP) - - // Store - c.featureSet.or(f) -} diff --git a/vendor/github.com/klauspost/cpuid/v2/detect_ref.go b/vendor/github.com/klauspost/cpuid/v2/detect_ref.go deleted file mode 100644 index 9636c2b..0000000 --- a/vendor/github.com/klauspost/cpuid/v2/detect_ref.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. - -//go:build (!amd64 && !386 && !arm64) || gccgo || noasm || appengine -// +build !amd64,!386,!arm64 gccgo noasm appengine - -package cpuid - -func initCPU() { - cpuid = func(uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 } - cpuidex = func(x, y uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 } - xgetbv = func(uint32) (a, b uint32) { return 0, 0 } - rdtscpAsm = func() (a, b, c, d uint32) { return 0, 0, 0, 0 } -} - -func addInfo(info *CPUInfo, safe bool) {} diff --git a/vendor/github.com/klauspost/cpuid/v2/detect_x86.go b/vendor/github.com/klauspost/cpuid/v2/detect_x86.go deleted file mode 100644 index 35678d8..0000000 --- a/vendor/github.com/klauspost/cpuid/v2/detect_x86.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. - -//go:build (386 && !gccgo && !noasm && !appengine) || (amd64 && !gccgo && !noasm && !appengine) -// +build 386,!gccgo,!noasm,!appengine amd64,!gccgo,!noasm,!appengine - -package cpuid - -func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) -func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) -func asmXgetbv(index uint32) (eax, edx uint32) -func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) -func asmDarwinHasAVX512() bool - -func initCPU() { - cpuid = asmCpuid - cpuidex = asmCpuidex - xgetbv = asmXgetbv - rdtscpAsm = asmRdtscpAsm - darwinHasAVX512 = asmDarwinHasAVX512 -} - -func addInfo(c *CPUInfo, safe bool) { - c.maxFunc = maxFunctionID() - c.maxExFunc = maxExtendedFunction() - c.BrandName = brandName() - c.CacheLine = cacheLine() - c.Family, c.Model = familyModel() - c.featureSet = support() - c.SGX = hasSGX(c.featureSet.inSet(SGX), c.featureSet.inSet(SGXLC)) - c.ThreadsPerCore = threadsPerCore() - c.LogicalCores = logicalCores() - c.PhysicalCores = physicalCores() - c.VendorID, c.VendorString = vendorID() - c.cacheSize() - c.frequencies() -} diff --git a/vendor/github.com/klauspost/cpuid/v2/featureid_string.go b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go deleted file mode 100644 index 02fe232..0000000 --- a/vendor/github.com/klauspost/cpuid/v2/featureid_string.go +++ /dev/null @@ -1,196 +0,0 @@ -// Code generated by "stringer -type=FeatureID,Vendor"; DO NOT EDIT. - -package cpuid - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[ADX-1] - _ = x[AESNI-2] - _ = x[AMD3DNOW-3] - _ = x[AMD3DNOWEXT-4] - _ = x[AMXBF16-5] - _ = x[AMXINT8-6] - _ = x[AMXTILE-7] - _ = x[AVX-8] - _ = x[AVX2-9] - _ = x[AVX512BF16-10] - _ = x[AVX512BITALG-11] - _ = x[AVX512BW-12] - _ = x[AVX512CD-13] - _ = x[AVX512DQ-14] - _ = x[AVX512ER-15] - _ = x[AVX512F-16] - _ = x[AVX512FP16-17] - _ = x[AVX512IFMA-18] - _ = x[AVX512PF-19] - _ = x[AVX512VBMI-20] - _ = x[AVX512VBMI2-21] - _ = x[AVX512VL-22] - _ = x[AVX512VNNI-23] - _ = x[AVX512VP2INTERSECT-24] - _ = x[AVX512VPOPCNTDQ-25] - _ = x[AVXSLOW-26] - _ = x[BMI1-27] - _ = x[BMI2-28] - _ = x[CETIBT-29] - _ = x[CETSS-30] - _ = x[CLDEMOTE-31] - _ = x[CLMUL-32] - _ = x[CLZERO-33] - _ = x[CMOV-34] - _ = x[CMPXCHG8-35] - _ = x[CPBOOST-36] - _ = x[CX16-37] - _ = x[ENQCMD-38] - _ = x[ERMS-39] - _ = x[F16C-40] - _ = x[FMA3-41] - _ = x[FMA4-42] - _ = x[FXSR-43] - _ = x[FXSROPT-44] - _ = x[GFNI-45] - _ = x[HLE-46] - _ = x[HTT-47] - _ = x[HWA-48] - _ = x[HYPERVISOR-49] - _ = x[IBPB-50] - _ = x[IBS-51] - _ = x[IBSBRNTRGT-52] - _ = x[IBSFETCHSAM-53] - _ = x[IBSFFV-54] - _ = x[IBSOPCNT-55] - _ = x[IBSOPCNTEXT-56] - _ = x[IBSOPSAM-57] - _ = x[IBSRDWROPCNT-58] - _ = x[IBSRIPINVALIDCHK-59] - _ = x[INT_WBINVD-60] - _ = x[INVLPGB-61] - _ = x[LAHF-62] - _ = x[LZCNT-63] - _ = x[MCAOVERFLOW-64] - _ = x[MCOMMIT-65] - _ = x[MMX-66] - _ = x[MMXEXT-67] - _ = x[MOVBE-68] - _ = x[MOVDIR64B-69] - _ = x[MOVDIRI-70] - _ = x[MPX-71] - _ = x[MSRIRC-72] - _ = x[NX-73] - _ = x[OSXSAVE-74] - _ = x[POPCNT-75] - _ = x[RDPRU-76] - _ = x[RDRAND-77] - _ = x[RDSEED-78] - _ = x[RDTSCP-79] - _ = x[RTM-80] - _ = x[RTM_ALWAYS_ABORT-81] - _ = x[SCE-82] - _ = x[SERIALIZE-83] - _ = x[SGX-84] - _ = x[SGXLC-85] - _ = x[SHA-86] - _ = x[SSE-87] - _ = x[SSE2-88] - _ = x[SSE3-89] - _ = x[SSE4-90] - _ = x[SSE42-91] - _ = x[SSE4A-92] - _ = x[SSSE3-93] - _ = x[STIBP-94] - _ = x[SUCCOR-95] - _ = x[TBM-96] - _ = x[TSXLDTRK-97] - _ = x[VAES-98] - _ = x[VMX-99] - _ = x[VPCLMULQDQ-100] - _ = x[WAITPKG-101] - _ = x[WBNOINVD-102] - _ = x[X87-103] - _ = x[XOP-104] - _ = x[XSAVE-105] - _ = x[AESARM-106] - _ = x[ARMCPUID-107] - _ = x[ASIMD-108] - _ = x[ASIMDDP-109] - _ = x[ASIMDHP-110] - _ = x[ASIMDRDM-111] - _ = x[ATOMICS-112] - _ = x[CRC32-113] - _ = x[DCPOP-114] - _ = x[EVTSTRM-115] - _ = x[FCMA-116] - _ = x[FP-117] - _ = x[FPHP-118] - _ = x[GPA-119] - _ = x[JSCVT-120] - _ = x[LRCPC-121] - _ = x[PMULL-122] - _ = x[SHA1-123] - _ = x[SHA2-124] - _ = x[SHA3-125] - _ = x[SHA512-126] - _ = x[SM3-127] - _ = x[SM4-128] - _ = x[SVE-129] - _ = x[lastID-130] - _ = x[firstID-0] -} - -const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXINT8AMXTILEAVXAVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXSLOWBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPXCHG8CPBOOSTCX16ENQCMDERMSF16CFMA3FMA4FXSRFXSROPTGFNIHLEHTTHWAHYPERVISORIBPBIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKINT_WBINVDINVLPGBLAHFLZCNTMCAOVERFLOWMCOMMITMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMPXMSRIRCNXOSXSAVEPOPCNTRDPRURDRANDRDSEEDRDTSCPRTMRTM_ALWAYS_ABORTSCESERIALIZESGXSGXLCSHASSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSUCCORTBMTSXLDTRKVAESVMXVPCLMULQDQWAITPKGWBNOINVDX87XOPXSAVEAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID" - -var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 58, 62, 72, 84, 92, 100, 108, 116, 123, 133, 143, 151, 161, 172, 180, 190, 208, 223, 230, 234, 238, 244, 249, 257, 262, 268, 272, 280, 287, 291, 297, 301, 305, 309, 313, 317, 324, 328, 331, 334, 337, 347, 351, 354, 364, 375, 381, 389, 400, 408, 420, 436, 446, 453, 457, 462, 473, 480, 483, 489, 494, 503, 510, 513, 519, 521, 528, 534, 539, 545, 551, 557, 560, 576, 579, 588, 591, 596, 599, 602, 606, 610, 614, 619, 624, 629, 634, 640, 643, 651, 655, 658, 668, 675, 683, 686, 689, 694, 700, 708, 713, 720, 727, 735, 742, 747, 752, 759, 763, 765, 769, 772, 777, 782, 787, 791, 795, 799, 805, 808, 811, 814, 820} - -func (i FeatureID) String() string { - if i < 0 || i >= FeatureID(len(_FeatureID_index)-1) { - return "FeatureID(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _FeatureID_name[_FeatureID_index[i]:_FeatureID_index[i+1]] -} -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[VendorUnknown-0] - _ = x[Intel-1] - _ = x[AMD-2] - _ = x[VIA-3] - _ = x[Transmeta-4] - _ = x[NSC-5] - _ = x[KVM-6] - _ = x[MSVM-7] - _ = x[VMware-8] - _ = x[XenHVM-9] - _ = x[Bhyve-10] - _ = x[Hygon-11] - _ = x[SiS-12] - _ = x[RDC-13] - _ = x[Ampere-14] - _ = x[ARM-15] - _ = x[Broadcom-16] - _ = x[Cavium-17] - _ = x[DEC-18] - _ = x[Fujitsu-19] - _ = x[Infineon-20] - _ = x[Motorola-21] - _ = x[NVIDIA-22] - _ = x[AMCC-23] - _ = x[Qualcomm-24] - _ = x[Marvell-25] - _ = x[lastVendor-26] -} - -const _Vendor_name = "VendorUnknownIntelAMDVIATransmetaNSCKVMMSVMVMwareXenHVMBhyveHygonSiSRDCAmpereARMBroadcomCaviumDECFujitsuInfineonMotorolaNVIDIAAMCCQualcommMarvelllastVendor" - -var _Vendor_index = [...]uint8{0, 13, 18, 21, 24, 33, 36, 39, 43, 49, 55, 60, 65, 68, 71, 77, 80, 88, 94, 97, 104, 112, 120, 126, 130, 138, 145, 155} - -func (i Vendor) String() string { - if i < 0 || i >= Vendor(len(_Vendor_index)-1) { - return "Vendor(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _Vendor_name[_Vendor_index[i]:_Vendor_index[i+1]] -} diff --git a/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go b/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go deleted file mode 100644 index 8d2cb03..0000000 --- a/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (c) 2020 Klaus Post, released under MIT License. See LICENSE file. - -package cpuid - -import "runtime" - -func detectOS(c *CPUInfo) bool { - // There are no hw.optional sysctl values for the below features on Mac OS 11.0 - // to detect their supported state dynamically. Assume the CPU features that - // Apple Silicon M1 supports to be available as a minimal set of features - // to all Go programs running on darwin/arm64. - // TODO: Add more if we know them. - c.featureSet.setIf(runtime.GOOS != "ios", AESARM, PMULL, SHA1, SHA2) - c.PhysicalCores = runtime.NumCPU() - // For now assuming 1 thread per core... - c.ThreadsPerCore = 1 - c.LogicalCores = c.PhysicalCores - return true -} diff --git a/vendor/github.com/klauspost/cpuid/v2/os_linux_arm64.go b/vendor/github.com/klauspost/cpuid/v2/os_linux_arm64.go deleted file mode 100644 index ee278b9..0000000 --- a/vendor/github.com/klauspost/cpuid/v2/os_linux_arm64.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright (c) 2020 Klaus Post, released under MIT License. See LICENSE file. - -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file located -// here https://github.com/golang/sys/blob/master/LICENSE - -package cpuid - -import ( - "encoding/binary" - "io/ioutil" - "runtime" -) - -// HWCAP bits. -const ( - hwcap_FP = 1 << 0 - hwcap_ASIMD = 1 << 1 - hwcap_EVTSTRM = 1 << 2 - hwcap_AES = 1 << 3 - hwcap_PMULL = 1 << 4 - hwcap_SHA1 = 1 << 5 - hwcap_SHA2 = 1 << 6 - hwcap_CRC32 = 1 << 7 - hwcap_ATOMICS = 1 << 8 - hwcap_FPHP = 1 << 9 - hwcap_ASIMDHP = 1 << 10 - hwcap_CPUID = 1 << 11 - hwcap_ASIMDRDM = 1 << 12 - hwcap_JSCVT = 1 << 13 - hwcap_FCMA = 1 << 14 - hwcap_LRCPC = 1 << 15 - hwcap_DCPOP = 1 << 16 - hwcap_SHA3 = 1 << 17 - hwcap_SM3 = 1 << 18 - hwcap_SM4 = 1 << 19 - hwcap_ASIMDDP = 1 << 20 - hwcap_SHA512 = 1 << 21 - hwcap_SVE = 1 << 22 - hwcap_ASIMDFHM = 1 << 23 -) - -func detectOS(c *CPUInfo) bool { - // For now assuming no hyperthreading is reasonable. - c.LogicalCores = runtime.NumCPU() - c.PhysicalCores = c.LogicalCores - c.ThreadsPerCore = 1 - if hwcap == 0 { - // We did not get values from the runtime. - // Try reading /proc/self/auxv - - // From https://github.com/golang/sys - const ( - _AT_HWCAP = 16 - _AT_HWCAP2 = 26 - - uintSize = int(32 << (^uint(0) >> 63)) - ) - - buf, err := ioutil.ReadFile("/proc/self/auxv") - if err != nil { - // e.g. on android /proc/self/auxv is not accessible, so silently - // ignore the error and leave Initialized = false. On some - // architectures (e.g. arm64) doinit() implements a fallback - // readout and will set Initialized = true again. - return false - } - bo := binary.LittleEndian - for len(buf) >= 2*(uintSize/8) { - var tag, val uint - switch uintSize { - case 32: - tag = uint(bo.Uint32(buf[0:])) - val = uint(bo.Uint32(buf[4:])) - buf = buf[8:] - case 64: - tag = uint(bo.Uint64(buf[0:])) - val = uint(bo.Uint64(buf[8:])) - buf = buf[16:] - } - switch tag { - case _AT_HWCAP: - hwcap = val - case _AT_HWCAP2: - // Not used - } - } - if hwcap == 0 { - return false - } - } - - // HWCap was populated by the runtime from the auxiliary vector. - // Use HWCap information since reading aarch64 system registers - // is not supported in user space on older linux kernels. - c.featureSet.setIf(isSet(hwcap, hwcap_AES), AESARM) - c.featureSet.setIf(isSet(hwcap, hwcap_ASIMD), ASIMD) - c.featureSet.setIf(isSet(hwcap, hwcap_ASIMDDP), ASIMDDP) - c.featureSet.setIf(isSet(hwcap, hwcap_ASIMDHP), ASIMDHP) - c.featureSet.setIf(isSet(hwcap, hwcap_ASIMDRDM), ASIMDRDM) - c.featureSet.setIf(isSet(hwcap, hwcap_CPUID), ARMCPUID) - c.featureSet.setIf(isSet(hwcap, hwcap_CRC32), CRC32) - c.featureSet.setIf(isSet(hwcap, hwcap_DCPOP), DCPOP) - c.featureSet.setIf(isSet(hwcap, hwcap_EVTSTRM), EVTSTRM) - c.featureSet.setIf(isSet(hwcap, hwcap_FCMA), FCMA) - c.featureSet.setIf(isSet(hwcap, hwcap_FP), FP) - c.featureSet.setIf(isSet(hwcap, hwcap_FPHP), FPHP) - c.featureSet.setIf(isSet(hwcap, hwcap_JSCVT), JSCVT) - c.featureSet.setIf(isSet(hwcap, hwcap_LRCPC), LRCPC) - c.featureSet.setIf(isSet(hwcap, hwcap_PMULL), PMULL) - c.featureSet.setIf(isSet(hwcap, hwcap_SHA1), SHA1) - c.featureSet.setIf(isSet(hwcap, hwcap_SHA2), SHA2) - c.featureSet.setIf(isSet(hwcap, hwcap_SHA3), SHA3) - c.featureSet.setIf(isSet(hwcap, hwcap_SHA512), SHA512) - c.featureSet.setIf(isSet(hwcap, hwcap_SM3), SM3) - c.featureSet.setIf(isSet(hwcap, hwcap_SM4), SM4) - c.featureSet.setIf(isSet(hwcap, hwcap_SVE), SVE) - - // The Samsung S9+ kernel reports support for atomics, but not all cores - // actually support them, resulting in SIGILL. See issue #28431. - // TODO(elias.naur): Only disable the optimization on bad chipsets on android. - c.featureSet.setIf(isSet(hwcap, hwcap_ATOMICS) && runtime.GOOS != "android", ATOMICS) - - return true -} - -func isSet(hwc uint, value uint) bool { - return hwc&value != 0 -} diff --git a/vendor/github.com/klauspost/cpuid/v2/os_other_arm64.go b/vendor/github.com/klauspost/cpuid/v2/os_other_arm64.go deleted file mode 100644 index 8733ba3..0000000 --- a/vendor/github.com/klauspost/cpuid/v2/os_other_arm64.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright (c) 2020 Klaus Post, released under MIT License. See LICENSE file. - -//go:build arm64 && !linux && !darwin -// +build arm64,!linux,!darwin - -package cpuid - -import "runtime" - -func detectOS(c *CPUInfo) bool { - c.PhysicalCores = runtime.NumCPU() - // For now assuming 1 thread per core... - c.ThreadsPerCore = 1 - c.LogicalCores = c.PhysicalCores - return false -} diff --git a/vendor/github.com/klauspost/cpuid/v2/os_safe_linux_arm64.go b/vendor/github.com/klauspost/cpuid/v2/os_safe_linux_arm64.go deleted file mode 100644 index f8f201b..0000000 --- a/vendor/github.com/klauspost/cpuid/v2/os_safe_linux_arm64.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright (c) 2021 Klaus Post, released under MIT License. See LICENSE file. - -//go:build nounsafe -// +build nounsafe - -package cpuid - -var hwcap uint diff --git a/vendor/github.com/klauspost/cpuid/v2/os_unsafe_linux_arm64.go b/vendor/github.com/klauspost/cpuid/v2/os_unsafe_linux_arm64.go deleted file mode 100644 index 92af622..0000000 --- a/vendor/github.com/klauspost/cpuid/v2/os_unsafe_linux_arm64.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (c) 2021 Klaus Post, released under MIT License. See LICENSE file. - -//go:build !nounsafe -// +build !nounsafe - -package cpuid - -import _ "unsafe" // needed for go:linkname - -//go:linkname hwcap internal/cpu.HWCap -var hwcap uint diff --git a/vendor/github.com/klauspost/cpuid/v2/test-architectures.sh b/vendor/github.com/klauspost/cpuid/v2/test-architectures.sh deleted file mode 100644 index 471d986..0000000 --- a/vendor/github.com/klauspost/cpuid/v2/test-architectures.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/sh - -set -e - -go tool dist list | while IFS=/ read os arch; do - echo "Checking $os/$arch..." - echo " normal" - GOARCH=$arch GOOS=$os go build -o /dev/null . - echo " noasm" - GOARCH=$arch GOOS=$os go build -tags noasm -o /dev/null . - echo " appengine" - GOARCH=$arch GOOS=$os go build -tags appengine -o /dev/null . - echo " noasm,appengine" - GOARCH=$arch GOOS=$os go build -tags 'appengine noasm' -o /dev/null . -done diff --git a/vendor/github.com/minio/blake2b-simd/.gitignore b/vendor/github.com/minio/blake2b-simd/.gitignore deleted file mode 100644 index c56069f..0000000 --- a/vendor/github.com/minio/blake2b-simd/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.test \ No newline at end of file diff --git a/vendor/github.com/minio/blake2b-simd/.travis.yml b/vendor/github.com/minio/blake2b-simd/.travis.yml deleted file mode 100644 index 545066e..0000000 --- a/vendor/github.com/minio/blake2b-simd/.travis.yml +++ /dev/null @@ -1,21 +0,0 @@ -sudo: required -dist: trusty -language: go - -os: -- linux -- osx - -osx_image: xcode7.2 - -go: -- 1.6 -- 1.5 - -env: -- ARCH=x86_64 -- ARCH=i686 - -script: -- diff -au <(gofmt -d .) <(printf "") -- go test -race -v ./... diff --git a/vendor/github.com/minio/blake2b-simd/LICENSE b/vendor/github.com/minio/blake2b-simd/LICENSE deleted file mode 100644 index d645695..0000000 --- a/vendor/github.com/minio/blake2b-simd/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/minio/blake2b-simd/README.md b/vendor/github.com/minio/blake2b-simd/README.md deleted file mode 100644 index 31fcbf7..0000000 --- a/vendor/github.com/minio/blake2b-simd/README.md +++ /dev/null @@ -1,144 +0,0 @@ -BLAKE2b-SIMD -============ - -Pure Go implementation of BLAKE2b using SIMD optimizations. - -Introduction ------------- - -This package was initially based on the pure go [BLAKE2b](https://github.com/dchest/blake2b) implementation of Dmitry Chestnykh and merged with the (`cgo` dependent) AVX optimized [BLAKE2](https://github.com/codahale/blake2) implementation (which in turn is based on the [official implementation](https://github.com/BLAKE2/BLAKE2). It does so by using [Go's Assembler](https://golang.org/doc/asm) for amd64 architectures with a golang only fallback for other architectures. - -In addition to AVX there is also support for AVX2 as well as SSE. Best performance is obtained with AVX2 which gives roughly a **4X** performance increase approaching hashing speeds of **1GB/sec** on a single core. - -Benchmarks ----------- - -This is a summary of the performance improvements. Full details are shown below. - -| Technology | 128K | -| ---------- |:-----:| -| AVX2 | 3.94x | -| AVX | 3.28x | -| SSE | 2.85x | - -asm2plan9s ----------- - -In order to be able to work more easily with AVX2/AVX instructions, a separate tool was developed to convert AVX2/AVX instructions into the corresponding BYTE sequence as accepted by Go assembly. See [asm2plan9s](https://github.com/minio/asm2plan9s) for more information. - -bt2sum ------- - -[bt2sum](https://github.com/s3git/bt2sum) is a utility that takes advantages of the BLAKE2b SIMD optimizations to compute check sums using the BLAKE2 Tree hashing mode in so called 'unlimited fanout' mode. - -Technical details ------------------ - -BLAKE2b is a hashing algorithm that operates on 64-bit integer values. The AVX2 version uses the 256-bit wide YMM registers in order to essentially process four operations in parallel. AVX and SSE operate on 128-bit values simultaneously (two operations in parallel). Below are excerpts from `compressAvx2_amd64.s`, `compressAvx_amd64.s`, and `compress_generic.go` respectively. - -``` - VPADDQ YMM0,YMM0,YMM1 /* v0 += v4, v1 += v5, v2 += v6, v3 += v7 */ -``` - -``` - VPADDQ XMM0,XMM0,XMM2 /* v0 += v4, v1 += v5 */ - VPADDQ XMM1,XMM1,XMM3 /* v2 += v6, v3 += v7 */ -``` - -``` - v0 += v4 - v1 += v5 - v2 += v6 - v3 += v7 -``` - -Detailed benchmarks -------------------- - -Example performance metrics were generated on Intel(R) Xeon(R) CPU E5-2620 v3 @ 2.40GHz - 6 physical cores, 12 logical cores running Ubuntu GNU/Linux with kernel version 4.4.0-24-generic (vanilla with no optimizations). - -### AVX2 - -``` -$ benchcmp go.txt avx2.txt -benchmark old ns/op new ns/op delta -BenchmarkHash64-12 1481 849 -42.67% -BenchmarkHash128-12 1428 746 -47.76% -BenchmarkHash1K-12 6379 2227 -65.09% -BenchmarkHash8K-12 37219 11714 -68.53% -BenchmarkHash32K-12 140716 35935 -74.46% -BenchmarkHash128K-12 561656 142634 -74.60% - -benchmark old MB/s new MB/s speedup -BenchmarkHash64-12 43.20 75.37 1.74x -BenchmarkHash128-12 89.64 171.35 1.91x -BenchmarkHash1K-12 160.52 459.69 2.86x -BenchmarkHash8K-12 220.10 699.32 3.18x -BenchmarkHash32K-12 232.87 911.85 3.92x -BenchmarkHash128K-12 233.37 918.93 3.94x -``` - -### AVX2: Comparison to other hashing techniques - -``` -$ go test -bench=Comparison -BenchmarkComparisonMD5-12 1000 1726121 ns/op 607.48 MB/s -BenchmarkComparisonSHA1-12 500 2005164 ns/op 522.94 MB/s -BenchmarkComparisonSHA256-12 300 5531036 ns/op 189.58 MB/s -BenchmarkComparisonSHA512-12 500 3423030 ns/op 306.33 MB/s -BenchmarkComparisonBlake2B-12 1000 1232690 ns/op 850.64 MB/s -``` - -Benchmarks below were generated on a MacBook Pro with a 2.7 GHz Intel Core i7. - -### AVX - -``` -$ benchcmp go.txt avx.txt -benchmark old ns/op new ns/op delta -BenchmarkHash64-8 813 458 -43.67% -BenchmarkHash128-8 766 401 -47.65% -BenchmarkHash1K-8 4881 1763 -63.88% -BenchmarkHash8K-8 36127 12273 -66.03% -BenchmarkHash32K-8 140582 43155 -69.30% -BenchmarkHash128K-8 567850 173246 -69.49% - -benchmark old MB/s new MB/s speedup -BenchmarkHash64-8 78.63 139.57 1.78x -BenchmarkHash128-8 166.98 318.73 1.91x -BenchmarkHash1K-8 209.76 580.68 2.77x -BenchmarkHash8K-8 226.76 667.46 2.94x -BenchmarkHash32K-8 233.09 759.29 3.26x -BenchmarkHash128K-8 230.82 756.56 3.28x -``` - -### SSE - -``` -$ benchcmp go.txt sse.txt -benchmark old ns/op new ns/op delta -BenchmarkHash64-8 813 478 -41.21% -BenchmarkHash128-8 766 411 -46.34% -BenchmarkHash1K-8 4881 1870 -61.69% -BenchmarkHash8K-8 36127 12427 -65.60% -BenchmarkHash32K-8 140582 49512 -64.78% -BenchmarkHash128K-8 567850 199040 -64.95% - -benchmark old MB/s new MB/s speedup -BenchmarkHash64-8 78.63 133.78 1.70x -BenchmarkHash128-8 166.98 311.23 1.86x -BenchmarkHash1K-8 209.76 547.37 2.61x -BenchmarkHash8K-8 226.76 659.20 2.91x -BenchmarkHash32K-8 233.09 661.81 2.84x -BenchmarkHash128K-8 230.82 658.52 2.85x -``` - -License -------- - -Released under the Apache License v2.0. You can find the complete text in the file LICENSE. - -Contributing ------------- - -Contributions are welcome, please send PRs for any enhancements. diff --git a/vendor/github.com/minio/blake2b-simd/appveyor.yml b/vendor/github.com/minio/blake2b-simd/appveyor.yml deleted file mode 100644 index 77595fe..0000000 --- a/vendor/github.com/minio/blake2b-simd/appveyor.yml +++ /dev/null @@ -1,32 +0,0 @@ -# version format -version: "{build}" - -# Operating system (build VM template) -os: Windows Server 2012 R2 - -# Platform. -platform: x64 - -clone_folder: c:\gopath\src\github.com\minio\blake2b-simd - -# environment variables -environment: - GOPATH: c:\gopath - GO15VENDOREXPERIMENT: 1 - -# scripts that run after cloning repository -install: - - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% - - go version - - go env - -# to run your custom scripts instead of automatic MSBuild -build_script: - - go test . - - go test -race . - -# to disable automatic tests -test: off - -# to disable deployment -deploy: off diff --git a/vendor/github.com/minio/blake2b-simd/blake2b.go b/vendor/github.com/minio/blake2b-simd/blake2b.go deleted file mode 100644 index 538466a..0000000 --- a/vendor/github.com/minio/blake2b-simd/blake2b.go +++ /dev/null @@ -1,301 +0,0 @@ -// Written in 2012 by Dmitry Chestnykh. -// -// To the extent possible under law, the author have dedicated all copyright -// and related and neighboring rights to this software to the public domain -// worldwide. This software is distributed without any warranty. -// http://creativecommons.org/publicdomain/zero/1.0/ - -// Package blake2b implements BLAKE2b cryptographic hash function. -package blake2b - -import ( - "encoding/binary" - "errors" - "hash" -) - -const ( - BlockSize = 128 // block size of algorithm - Size = 64 // maximum digest size - SaltSize = 16 // maximum salt size - PersonSize = 16 // maximum personalization string size - KeySize = 64 // maximum size of key -) - -type digest struct { - h [8]uint64 // current chain value - t [2]uint64 // message bytes counter - f [2]uint64 // finalization flags - x [BlockSize]byte // buffer for data not yet compressed - nx int // number of bytes in buffer - - ih [8]uint64 // initial chain value (after config) - paddedKey [BlockSize]byte // copy of key, padded with zeros - isKeyed bool // indicates whether hash was keyed - size uint8 // digest size in bytes - isLastNode bool // indicates processing of the last node in tree hashing -} - -// Initialization values. -var iv = [8]uint64{ - 0x6a09e667f3bcc908, 0xbb67ae8584caa73b, - 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1, - 0x510e527fade682d1, 0x9b05688c2b3e6c1f, - 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179, -} - -// Config is used to configure hash function parameters and keying. -// All parameters are optional. -type Config struct { - Size uint8 // digest size (if zero, default size of 64 bytes is used) - Key []byte // key for prefix-MAC - Salt []byte // salt (if < 16 bytes, padded with zeros) - Person []byte // personalization (if < 16 bytes, padded with zeros) - Tree *Tree // parameters for tree hashing -} - -// Tree represents parameters for tree hashing. -type Tree struct { - Fanout uint8 // fanout - MaxDepth uint8 // maximal depth - LeafSize uint32 // leaf maximal byte length (0 for unlimited) - NodeOffset uint64 // node offset (0 for first, leftmost or leaf) - NodeDepth uint8 // node depth (0 for leaves) - InnerHashSize uint8 // inner hash byte length - IsLastNode bool // indicates processing of the last node of layer -} - -var ( - defaultConfig = &Config{Size: Size} - config256 = &Config{Size: 32} -) - -func verifyConfig(c *Config) error { - if c.Size > Size { - return errors.New("digest size is too large") - } - if len(c.Key) > KeySize { - return errors.New("key is too large") - } - if len(c.Salt) > SaltSize { - // Smaller salt is okay: it will be padded with zeros. - return errors.New("salt is too large") - } - if len(c.Person) > PersonSize { - // Smaller personalization is okay: it will be padded with zeros. - return errors.New("personalization is too large") - } - if c.Tree != nil { - if c.Tree.Fanout == 1 { - return errors.New("fanout of 1 is not allowed in tree mode") - } - if c.Tree.MaxDepth < 2 { - return errors.New("incorrect tree depth") - } - if c.Tree.InnerHashSize < 1 || c.Tree.InnerHashSize > Size { - return errors.New("incorrect tree inner hash size") - } - } - return nil -} - -// New returns a new hash.Hash configured with the given Config. -// Config can be nil, in which case the default one is used, calculating 64-byte digest. -// Returns non-nil error if Config contains invalid parameters. -func New(c *Config) (hash.Hash, error) { - if c == nil { - c = defaultConfig - } else { - if c.Size == 0 { - // Set default size if it's zero. - c.Size = Size - } - if err := verifyConfig(c); err != nil { - return nil, err - } - } - d := new(digest) - d.initialize(c) - return d, nil -} - -// initialize initializes digest with the given -// config, which must be non-nil and verified. -func (d *digest) initialize(c *Config) { - // Create parameter block. - var p [BlockSize]byte - p[0] = c.Size - p[1] = uint8(len(c.Key)) - if c.Salt != nil { - copy(p[32:], c.Salt) - } - if c.Person != nil { - copy(p[48:], c.Person) - } - if c.Tree != nil { - p[2] = c.Tree.Fanout - p[3] = c.Tree.MaxDepth - binary.LittleEndian.PutUint32(p[4:], c.Tree.LeafSize) - binary.LittleEndian.PutUint64(p[8:], c.Tree.NodeOffset) - p[16] = c.Tree.NodeDepth - p[17] = c.Tree.InnerHashSize - } else { - p[2] = 1 - p[3] = 1 - } - - // Initialize. - d.size = c.Size - for i := 0; i < 8; i++ { - d.h[i] = iv[i] ^ binary.LittleEndian.Uint64(p[i*8:]) - } - if c.Tree != nil && c.Tree.IsLastNode { - d.isLastNode = true - } - - // Process key. - if c.Key != nil { - copy(d.paddedKey[:], c.Key) - d.Write(d.paddedKey[:]) - d.isKeyed = true - } - // Save a copy of initialized state. - copy(d.ih[:], d.h[:]) -} - -// New512 returns a new hash.Hash computing the BLAKE2b 64-byte checksum. -func New512() hash.Hash { - d := new(digest) - d.initialize(defaultConfig) - return d -} - -// New256 returns a new hash.Hash computing the BLAKE2b 32-byte checksum. -func New256() hash.Hash { - d := new(digest) - d.initialize(config256) - return d -} - -// NewMAC returns a new hash.Hash computing BLAKE2b prefix- -// Message Authentication Code of the given size in bytes -// (up to 64) with the given key (up to 64 bytes in length). -func NewMAC(outBytes uint8, key []byte) hash.Hash { - d, err := New(&Config{Size: outBytes, Key: key}) - if err != nil { - panic(err.Error()) - } - return d -} - -// Reset resets the state of digest to the initial state -// after configuration and keying. -func (d *digest) Reset() { - copy(d.h[:], d.ih[:]) - d.t[0] = 0 - d.t[1] = 0 - d.f[0] = 0 - d.f[1] = 0 - d.nx = 0 - if d.isKeyed { - d.Write(d.paddedKey[:]) - } -} - -// Size returns the digest size in bytes. -func (d *digest) Size() int { return int(d.size) } - -// BlockSize returns the algorithm block size in bytes. -func (d *digest) BlockSize() int { return BlockSize } - -func (d *digest) Write(p []byte) (nn int, err error) { - nn = len(p) - left := BlockSize - d.nx - if len(p) > left { - // Process buffer. - copy(d.x[d.nx:], p[:left]) - p = p[left:] - compress(d, d.x[:]) - d.nx = 0 - } - // Process full blocks except for the last one. - if len(p) > BlockSize { - n := len(p) &^ (BlockSize - 1) - if n == len(p) { - n -= BlockSize - } - compress(d, p[:n]) - p = p[n:] - } - // Fill buffer. - d.nx += copy(d.x[d.nx:], p) - return -} - -// Sum returns the calculated checksum. -func (d *digest) Sum(in []byte) []byte { - // Make a copy of d so that caller can keep writing and summing. - d0 := *d - hash := d0.checkSum() - return append(in, hash[:d0.size]...) -} - -func (d *digest) checkSum() [Size]byte { - // Do not create unnecessary copies of the key. - if d.isKeyed { - for i := 0; i < len(d.paddedKey); i++ { - d.paddedKey[i] = 0 - } - } - - dec := BlockSize - uint64(d.nx) - if d.t[0] < dec { - d.t[1]-- - } - d.t[0] -= dec - - // Pad buffer with zeros. - for i := d.nx; i < len(d.x); i++ { - d.x[i] = 0 - } - // Set last block flag. - d.f[0] = 0xffffffffffffffff - if d.isLastNode { - d.f[1] = 0xffffffffffffffff - } - // Compress last block. - compress(d, d.x[:]) - - var out [Size]byte - j := 0 - for _, s := range d.h[:(d.size-1)/8+1] { - out[j+0] = byte(s >> 0) - out[j+1] = byte(s >> 8) - out[j+2] = byte(s >> 16) - out[j+3] = byte(s >> 24) - out[j+4] = byte(s >> 32) - out[j+5] = byte(s >> 40) - out[j+6] = byte(s >> 48) - out[j+7] = byte(s >> 56) - j += 8 - } - return out -} - -// Sum512 returns a 64-byte BLAKE2b hash of data. -func Sum512(data []byte) [64]byte { - var d digest - d.initialize(defaultConfig) - d.Write(data) - return d.checkSum() -} - -// Sum256 returns a 32-byte BLAKE2b hash of data. -func Sum256(data []byte) (out [32]byte) { - var d digest - d.initialize(config256) - d.Write(data) - sum := d.checkSum() - copy(out[:], sum[:32]) - return -} diff --git a/vendor/github.com/minio/blake2b-simd/compress_generic.go b/vendor/github.com/minio/blake2b-simd/compress_generic.go deleted file mode 100644 index 61c5b1d..0000000 --- a/vendor/github.com/minio/blake2b-simd/compress_generic.go +++ /dev/null @@ -1,1419 +0,0 @@ -// Written in 2012 by Dmitry Chestnykh. -// -// To the extent possible under law, the author have dedicated all copyright -// and related and neighboring rights to this software to the public domain -// worldwide. This software is distributed without any warranty. -// http://creativecommons.org/publicdomain/zero/1.0/ - -package blake2b - -func compress(d *digest, p []uint8) { - h0, h1, h2, h3, h4, h5, h6, h7 := d.h[0], d.h[1], d.h[2], d.h[3], d.h[4], d.h[5], d.h[6], d.h[7] - - for len(p) >= BlockSize { - // Increment counter. - d.t[0] += BlockSize - if d.t[0] < BlockSize { - d.t[1]++ - } - // Initialize compression function. - v0, v1, v2, v3, v4, v5, v6, v7 := h0, h1, h2, h3, h4, h5, h6, h7 - v8 := iv[0] - v9 := iv[1] - v10 := iv[2] - v11 := iv[3] - v12 := iv[4] ^ d.t[0] - v13 := iv[5] ^ d.t[1] - v14 := iv[6] ^ d.f[0] - v15 := iv[7] ^ d.f[1] - - j := 0 - var m [16]uint64 - for i := range m { - m[i] = uint64(p[j]) | uint64(p[j+1])<<8 | uint64(p[j+2])<<16 | - uint64(p[j+3])<<24 | uint64(p[j+4])<<32 | uint64(p[j+5])<<40 | - uint64(p[j+6])<<48 | uint64(p[j+7])<<56 - j += 8 - } - - // Round 1. - v0 += m[0] - v0 += v4 - v12 ^= v0 - v12 = v12<<(64-32) | v12>>32 - v8 += v12 - v4 ^= v8 - v4 = v4<<(64-24) | v4>>24 - v1 += m[2] - v1 += v5 - v13 ^= v1 - v13 = v13<<(64-32) | v13>>32 - v9 += v13 - v5 ^= v9 - v5 = v5<<(64-24) | v5>>24 - v2 += m[4] - v2 += v6 - v14 ^= v2 - v14 = v14<<(64-32) | v14>>32 - v10 += v14 - v6 ^= v10 - v6 = v6<<(64-24) | v6>>24 - v3 += m[6] - v3 += v7 - v15 ^= v3 - v15 = v15<<(64-32) | v15>>32 - v11 += v15 - v7 ^= v11 - v7 = v7<<(64-24) | v7>>24 - v2 += m[5] - v2 += v6 - v14 ^= v2 - v14 = v14<<(64-16) | v14>>16 - v10 += v14 - v6 ^= v10 - v6 = v6<<(64-63) | v6>>63 - v3 += m[7] - v3 += v7 - v15 ^= v3 - v15 = v15<<(64-16) | v15>>16 - v11 += v15 - v7 ^= v11 - v7 = v7<<(64-63) | v7>>63 - v1 += m[3] - v1 += v5 - v13 ^= v1 - v13 = v13<<(64-16) | v13>>16 - v9 += v13 - v5 ^= v9 - v5 = v5<<(64-63) | v5>>63 - v0 += m[1] - v0 += v4 - v12 ^= v0 - v12 = v12<<(64-16) | v12>>16 - v8 += v12 - v4 ^= v8 - v4 = v4<<(64-63) | v4>>63 - v0 += m[8] - v0 += v5 - v15 ^= v0 - v15 = v15<<(64-32) | v15>>32 - v10 += v15 - v5 ^= v10 - v5 = v5<<(64-24) | v5>>24 - v1 += m[10] - v1 += v6 - v12 ^= v1 - v12 = v12<<(64-32) | v12>>32 - v11 += v12 - v6 ^= v11 - v6 = v6<<(64-24) | v6>>24 - v2 += m[12] - v2 += v7 - v13 ^= v2 - v13 = v13<<(64-32) | v13>>32 - v8 += v13 - v7 ^= v8 - v7 = v7<<(64-24) | v7>>24 - v3 += m[14] - v3 += v4 - v14 ^= v3 - v14 = v14<<(64-32) | v14>>32 - v9 += v14 - v4 ^= v9 - v4 = v4<<(64-24) | v4>>24 - v2 += m[13] - v2 += v7 - v13 ^= v2 - v13 = v13<<(64-16) | v13>>16 - v8 += v13 - v7 ^= v8 - v7 = v7<<(64-63) | v7>>63 - v3 += m[15] - v3 += v4 - v14 ^= v3 - v14 = v14<<(64-16) | v14>>16 - v9 += v14 - v4 ^= v9 - v4 = v4<<(64-63) | v4>>63 - v1 += m[11] - v1 += v6 - v12 ^= v1 - v12 = v12<<(64-16) | v12>>16 - v11 += v12 - v6 ^= v11 - v6 = v6<<(64-63) | v6>>63 - v0 += m[9] - v0 += v5 - v15 ^= v0 - v15 = v15<<(64-16) | v15>>16 - v10 += v15 - v5 ^= v10 - v5 = v5<<(64-63) | v5>>63 - - // Round 2. - v0 += m[14] - v0 += v4 - v12 ^= v0 - v12 = v12<<(64-32) | v12>>32 - v8 += v12 - v4 ^= v8 - v4 = v4<<(64-24) | v4>>24 - v1 += m[4] - v1 += v5 - v13 ^= v1 - v13 = v13<<(64-32) | v13>>32 - v9 += v13 - v5 ^= v9 - v5 = v5<<(64-24) | v5>>24 - v2 += m[9] - v2 += v6 - v14 ^= v2 - v14 = v14<<(64-32) | v14>>32 - v10 += v14 - v6 ^= v10 - v6 = v6<<(64-24) | v6>>24 - v3 += m[13] - v3 += v7 - v15 ^= v3 - v15 = v15<<(64-32) | v15>>32 - v11 += v15 - v7 ^= v11 - v7 = v7<<(64-24) | v7>>24 - v2 += m[15] - v2 += v6 - v14 ^= v2 - v14 = v14<<(64-16) | v14>>16 - v10 += v14 - v6 ^= v10 - v6 = v6<<(64-63) | v6>>63 - v3 += m[6] - v3 += v7 - v15 ^= v3 - v15 = v15<<(64-16) | v15>>16 - v11 += v15 - v7 ^= v11 - v7 = v7<<(64-63) | v7>>63 - v1 += m[8] - v1 += v5 - v13 ^= v1 - v13 = v13<<(64-16) | v13>>16 - v9 += v13 - v5 ^= v9 - v5 = v5<<(64-63) | v5>>63 - v0 += m[10] - v0 += v4 - v12 ^= v0 - v12 = v12<<(64-16) | v12>>16 - v8 += v12 - v4 ^= v8 - v4 = v4<<(64-63) | v4>>63 - v0 += m[1] - v0 += v5 - v15 ^= v0 - v15 = v15<<(64-32) | v15>>32 - v10 += v15 - v5 ^= v10 - v5 = v5<<(64-24) | v5>>24 - v1 += m[0] - v1 += v6 - v12 ^= v1 - v12 = v12<<(64-32) | v12>>32 - v11 += v12 - v6 ^= v11 - v6 = v6<<(64-24) | v6>>24 - v2 += m[11] - v2 += v7 - v13 ^= v2 - v13 = v13<<(64-32) | v13>>32 - v8 += v13 - v7 ^= v8 - v7 = v7<<(64-24) | v7>>24 - v3 += m[5] - v3 += v4 - v14 ^= v3 - v14 = v14<<(64-32) | v14>>32 - v9 += v14 - v4 ^= v9 - v4 = v4<<(64-24) | v4>>24 - v2 += m[7] - v2 += v7 - v13 ^= v2 - v13 = v13<<(64-16) | v13>>16 - v8 += v13 - v7 ^= v8 - v7 = v7<<(64-63) | v7>>63 - v3 += m[3] - v3 += v4 - v14 ^= v3 - v14 = v14<<(64-16) | v14>>16 - v9 += v14 - v4 ^= v9 - v4 = v4<<(64-63) | v4>>63 - v1 += m[2] - v1 += v6 - v12 ^= v1 - v12 = v12<<(64-16) | v12>>16 - v11 += v12 - v6 ^= v11 - v6 = v6<<(64-63) | v6>>63 - v0 += m[12] - v0 += v5 - v15 ^= v0 - v15 = v15<<(64-16) | v15>>16 - v10 += v15 - v5 ^= v10 - v5 = v5<<(64-63) | v5>>63 - - // Round 3. - v0 += m[11] - v0 += v4 - v12 ^= v0 - v12 = v12<<(64-32) | v12>>32 - v8 += v12 - v4 ^= v8 - v4 = v4<<(64-24) | v4>>24 - v1 += m[12] - v1 += v5 - v13 ^= v1 - v13 = v13<<(64-32) | v13>>32 - v9 += v13 - v5 ^= v9 - v5 = v5<<(64-24) | v5>>24 - v2 += m[5] - v2 += v6 - v14 ^= v2 - v14 = v14<<(64-32) | v14>>32 - v10 += v14 - v6 ^= v10 - v6 = v6<<(64-24) | v6>>24 - v3 += m[15] - v3 += v7 - v15 ^= v3 - v15 = v15<<(64-32) | v15>>32 - v11 += v15 - v7 ^= v11 - v7 = v7<<(64-24) | v7>>24 - v2 += m[2] - v2 += v6 - v14 ^= v2 - v14 = v14<<(64-16) | v14>>16 - v10 += v14 - v6 ^= v10 - v6 = v6<<(64-63) | v6>>63 - v3 += m[13] - v3 += v7 - v15 ^= v3 - v15 = v15<<(64-16) | v15>>16 - v11 += v15 - v7 ^= v11 - v7 = v7<<(64-63) | v7>>63 - v1 += m[0] - v1 += v5 - v13 ^= v1 - v13 = v13<<(64-16) | v13>>16 - v9 += v13 - v5 ^= v9 - v5 = v5<<(64-63) | v5>>63 - v0 += m[8] - v0 += v4 - v12 ^= v0 - v12 = v12<<(64-16) | v12>>16 - v8 += v12 - v4 ^= v8 - v4 = v4<<(64-63) | v4>>63 - v0 += m[10] - v0 += v5 - v15 ^= v0 - v15 = v15<<(64-32) | v15>>32 - v10 += v15 - v5 ^= v10 - v5 = v5<<(64-24) | v5>>24 - v1 += m[3] - v1 += v6 - v12 ^= v1 - v12 = v12<<(64-32) | v12>>32 - v11 += v12 - v6 ^= v11 - v6 = v6<<(64-24) | v6>>24 - v2 += m[7] - v2 += v7 - v13 ^= v2 - v13 = v13<<(64-32) | v13>>32 - v8 += v13 - v7 ^= v8 - v7 = v7<<(64-24) | v7>>24 - v3 += m[9] - v3 += v4 - v14 ^= v3 - v14 = v14<<(64-32) | v14>>32 - v9 += v14 - v4 ^= v9 - v4 = v4<<(64-24) | v4>>24 - v2 += m[1] - v2 += v7 - v13 ^= v2 - v13 = v13<<(64-16) | v13>>16 - v8 += v13 - v7 ^= v8 - v7 = v7<<(64-63) | v7>>63 - v3 += m[4] - v3 += v4 - v14 ^= v3 - v14 = v14<<(64-16) | v14>>16 - v9 += v14 - v4 ^= v9 - v4 = v4<<(64-63) | v4>>63 - v1 += m[6] - v1 += v6 - v12 ^= v1 - v12 = v12<<(64-16) | v12>>16 - v11 += v12 - v6 ^= v11 - v6 = v6<<(64-63) | v6>>63 - v0 += m[14] - v0 += v5 - v15 ^= v0 - v15 = v15<<(64-16) | v15>>16 - v10 += v15 - v5 ^= v10 - v5 = v5<<(64-63) | v5>>63 - - // Round 4. - v0 += m[7] - v0 += v4 - v12 ^= v0 - v12 = v12<<(64-32) | v12>>32 - v8 += v12 - v4 ^= v8 - v4 = v4<<(64-24) | v4>>24 - v1 += m[3] - v1 += v5 - v13 ^= v1 - v13 = v13<<(64-32) | v13>>32 - v9 += v13 - v5 ^= v9 - v5 = v5<<(64-24) | v5>>24 - v2 += m[13] - v2 += v6 - v14 ^= v2 - v14 = v14<<(64-32) | v14>>32 - v10 += v14 - v6 ^= v10 - v6 = v6<<(64-24) | v6>>24 - v3 += m[11] - v3 += v7 - v15 ^= v3 - v15 = v15<<(64-32) | v15>>32 - v11 += v15 - v7 ^= v11 - v7 = v7<<(64-24) | v7>>24 - v2 += m[12] - v2 += v6 - v14 ^= v2 - v14 = v14<<(64-16) | v14>>16 - v10 += v14 - v6 ^= v10 - v6 = v6<<(64-63) | v6>>63 - v3 += m[14] - v3 += v7 - v15 ^= v3 - v15 = v15<<(64-16) | v15>>16 - v11 += v15 - v7 ^= v11 - v7 = v7<<(64-63) | v7>>63 - v1 += m[1] - v1 += v5 - v13 ^= v1 - v13 = v13<<(64-16) | v13>>16 - v9 += v13 - v5 ^= v9 - v5 = v5<<(64-63) | v5>>63 - v0 += m[9] - v0 += v4 - v12 ^= v0 - v12 = v12<<(64-16) | v12>>16 - v8 += v12 - v4 ^= v8 - v4 = v4<<(64-63) | v4>>63 - v0 += m[2] - v0 += v5 - v15 ^= v0 - v15 = v15<<(64-32) | v15>>32 - v10 += v15 - v5 ^= v10 - v5 = v5<<(64-24) | v5>>24 - v1 += m[5] - v1 += v6 - v12 ^= v1 - v12 = v12<<(64-32) | v12>>32 - v11 += v12 - v6 ^= v11 - v6 = v6<<(64-24) | v6>>24 - v2 += m[4] - v2 += v7 - v13 ^= v2 - v13 = v13<<(64-32) | v13>>32 - v8 += v13 - v7 ^= v8 - v7 = v7<<(64-24) | v7>>24 - v3 += m[15] - v3 += v4 - v14 ^= v3 - v14 = v14<<(64-32) | v14>>32 - v9 += v14 - v4 ^= v9 - v4 = v4<<(64-24) | v4>>24 - v2 += m[0] - v2 += v7 - v13 ^= v2 - v13 = v13<<(64-16) | v13>>16 - v8 += v13 - v7 ^= v8 - v7 = v7<<(64-63) | v7>>63 - v3 += m[8] - v3 += v4 - v14 ^= v3 - v14 = v14<<(64-16) | v14>>16 - v9 += v14 - v4 ^= v9 - v4 = v4<<(64-63) | v4>>63 - v1 += m[10] - v1 += v6 - v12 ^= v1 - v12 = v12<<(64-16) | v12>>16 - v11 += v12 - v6 ^= v11 - v6 = v6<<(64-63) | v6>>63 - v0 += m[6] - v0 += v5 - v15 ^= v0 - v15 = v15<<(64-16) | v15>>16 - v10 += v15 - v5 ^= v10 - v5 = v5<<(64-63) | v5>>63 - - // Round 5. - v0 += m[9] - v0 += v4 - v12 ^= v0 - v12 = v12<<(64-32) | v12>>32 - v8 += v12 - v4 ^= v8 - v4 = v4<<(64-24) | v4>>24 - v1 += m[5] - v1 += v5 - v13 ^= v1 - v13 = v13<<(64-32) | v13>>32 - v9 += v13 - v5 ^= v9 - v5 = v5<<(64-24) | v5>>24 - v2 += m[2] - v2 += v6 - v14 ^= v2 - v14 = v14<<(64-32) | v14>>32 - v10 += v14 - v6 ^= v10 - v6 = v6<<(64-24) | v6>>24 - v3 += m[10] - v3 += v7 - v15 ^= v3 - v15 = v15<<(64-32) | v15>>32 - v11 += v15 - v7 ^= v11 - v7 = v7<<(64-24) | v7>>24 - v2 += m[4] - v2 += v6 - v14 ^= v2 - v14 = v14<<(64-16) | v14>>16 - v10 += v14 - v6 ^= v10 - v6 = v6<<(64-63) | v6>>63 - v3 += m[15] - v3 += v7 - v15 ^= v3 - v15 = v15<<(64-16) | v15>>16 - v11 += v15 - v7 ^= v11 - v7 = v7<<(64-63) | v7>>63 - v1 += m[7] - v1 += v5 - v13 ^= v1 - v13 = v13<<(64-16) | v13>>16 - v9 += v13 - v5 ^= v9 - v5 = v5<<(64-63) | v5>>63 - v0 += m[0] - v0 += v4 - v12 ^= v0 - v12 = v12<<(64-16) | v12>>16 - v8 += v12 - v4 ^= v8 - v4 = v4<<(64-63) | v4>>63 - v0 += m[14] - v0 += v5 - v15 ^= v0 - v15 = v15<<(64-32) | v15>>32 - v10 += v15 - v5 ^= v10 - v5 = v5<<(64-24) | v5>>24 - v1 += m[11] - v1 += v6 - v12 ^= v1 - v12 = v12<<(64-32) | v12>>32 - v11 += v12 - v6 ^= v11 - v6 = v6<<(64-24) | v6>>24 - v2 += m[6] - v2 += v7 - v13 ^= v2 - v13 = v13<<(64-32) | v13>>32 - v8 += v13 - v7 ^= v8 - v7 = v7<<(64-24) | v7>>24 - v3 += m[3] - v3 += v4 - v14 ^= v3 - v14 = v14<<(64-32) | v14>>32 - v9 += v14 - v4 ^= v9 - v4 = v4<<(64-24) | v4>>24 - v2 += m[8] - v2 += v7 - v13 ^= v2 - v13 = v13<<(64-16) | v13>>16 - v8 += v13 - v7 ^= v8 - v7 = v7<<(64-63) | v7>>63 - v3 += m[13] - v3 += v4 - v14 ^= v3 - v14 = v14<<(64-16) | v14>>16 - v9 += v14 - v4 ^= v9 - v4 = v4<<(64-63) | v4>>63 - v1 += m[12] - v1 += v6 - v12 ^= v1 - v12 = v12<<(64-16) | v12>>16 - v11 += v12 - v6 ^= v11 - v6 = v6<<(64-63) | v6>>63 - v0 += m[1] - v0 += v5 - v15 ^= v0 - v15 = v15<<(64-16) | v15>>16 - v10 += v15 - v5 ^= v10 - v5 = v5<<(64-63) | v5>>63 - - // Round 6. - v0 += m[2] - v0 += v4 - v12 ^= v0 - v12 = v12<<(64-32) | v12>>32 - v8 += v12 - v4 ^= v8 - v4 = v4<<(64-24) | v4>>24 - v1 += m[6] - v1 += v5 - v13 ^= v1 - v13 = v13<<(64-32) | v13>>32 - v9 += v13 - v5 ^= v9 - v5 = v5<<(64-24) | v5>>24 - v2 += m[0] - v2 += v6 - v14 ^= v2 - v14 = v14<<(64-32) | v14>>32 - v10 += v14 - v6 ^= v10 - v6 = v6<<(64-24) | v6>>24 - v3 += m[8] - v3 += v7 - v15 ^= v3 - v15 = v15<<(64-32) | v15>>32 - v11 += v15 - v7 ^= v11 - v7 = v7<<(64-24) | v7>>24 - v2 += m[11] - v2 += v6 - v14 ^= v2 - v14 = v14<<(64-16) | v14>>16 - v10 += v14 - v6 ^= v10 - v6 = v6<<(64-63) | v6>>63 - v3 += m[3] - v3 += v7 - v15 ^= v3 - v15 = v15<<(64-16) | v15>>16 - v11 += v15 - v7 ^= v11 - v7 = v7<<(64-63) | v7>>63 - v1 += m[10] - v1 += v5 - v13 ^= v1 - v13 = v13<<(64-16) | v13>>16 - v9 += v13 - v5 ^= v9 - v5 = v5<<(64-63) | v5>>63 - v0 += m[12] - v0 += v4 - v12 ^= v0 - v12 = v12<<(64-16) | v12>>16 - v8 += v12 - v4 ^= v8 - v4 = v4<<(64-63) | v4>>63 - v0 += m[4] - v0 += v5 - v15 ^= v0 - v15 = v15<<(64-32) | v15>>32 - v10 += v15 - v5 ^= v10 - v5 = v5<<(64-24) | v5>>24 - v1 += m[7] - v1 += v6 - v12 ^= v1 - v12 = v12<<(64-32) | v12>>32 - v11 += v12 - v6 ^= v11 - v6 = v6<<(64-24) | v6>>24 - v2 += m[15] - v2 += v7 - v13 ^= v2 - v13 = v13<<(64-32) | v13>>32 - v8 += v13 - v7 ^= v8 - v7 = v7<<(64-24) | v7>>24 - v3 += m[1] - v3 += v4 - v14 ^= v3 - v14 = v14<<(64-32) | v14>>32 - v9 += v14 - v4 ^= v9 - v4 = v4<<(64-24) | v4>>24 - v2 += m[14] - v2 += v7 - v13 ^= v2 - v13 = v13<<(64-16) | v13>>16 - v8 += v13 - v7 ^= v8 - v7 = v7<<(64-63) | v7>>63 - v3 += m[9] - v3 += v4 - v14 ^= v3 - v14 = v14<<(64-16) | v14>>16 - v9 += v14 - v4 ^= v9 - v4 = v4<<(64-63) | v4>>63 - v1 += m[5] - v1 += v6 - v12 ^= v1 - v12 = v12<<(64-16) | v12>>16 - v11 += v12 - v6 ^= v11 - v6 = v6<<(64-63) | v6>>63 - v0 += m[13] - v0 += v5 - v15 ^= v0 - v15 = v15<<(64-16) | v15>>16 - v10 += v15 - v5 ^= v10 - v5 = v5<<(64-63) | v5>>63 - - // Round 7. - v0 += m[12] - v0 += v4 - v12 ^= v0 - v12 = v12<<(64-32) | v12>>32 - v8 += v12 - v4 ^= v8 - v4 = v4<<(64-24) | v4>>24 - v1 += m[1] - v1 += v5 - v13 ^= v1 - v13 = v13<<(64-32) | v13>>32 - v9 += v13 - v5 ^= v9 - v5 = v5<<(64-24) | v5>>24 - v2 += m[14] - v2 += v6 - v14 ^= v2 - v14 = v14<<(64-32) | v14>>32 - v10 += v14 - v6 ^= v10 - v6 = v6<<(64-24) | v6>>24 - v3 += m[4] - v3 += v7 - v15 ^= v3 - v15 = v15<<(64-32) | v15>>32 - v11 += v15 - v7 ^= v11 - v7 = v7<<(64-24) | v7>>24 - v2 += m[13] - v2 += v6 - v14 ^= v2 - v14 = v14<<(64-16) | v14>>16 - v10 += v14 - v6 ^= v10 - v6 = v6<<(64-63) | v6>>63 - v3 += m[10] - v3 += v7 - v15 ^= v3 - v15 = v15<<(64-16) | v15>>16 - v11 += v15 - v7 ^= v11 - v7 = v7<<(64-63) | v7>>63 - v1 += m[15] - v1 += v5 - v13 ^= v1 - v13 = v13<<(64-16) | v13>>16 - v9 += v13 - v5 ^= v9 - v5 = v5<<(64-63) | v5>>63 - v0 += m[5] - v0 += v4 - v12 ^= v0 - v12 = v12<<(64-16) | v12>>16 - v8 += v12 - v4 ^= v8 - v4 = v4<<(64-63) | v4>>63 - v0 += m[0] - v0 += v5 - v15 ^= v0 - v15 = v15<<(64-32) | v15>>32 - v10 += v15 - v5 ^= v10 - v5 = v5<<(64-24) | v5>>24 - v1 += m[6] - v1 += v6 - v12 ^= v1 - v12 = v12<<(64-32) | v12>>32 - v11 += v12 - v6 ^= v11 - v6 = v6<<(64-24) | v6>>24 - v2 += m[9] - v2 += v7 - v13 ^= v2 - v13 = v13<<(64-32) | v13>>32 - v8 += v13 - v7 ^= v8 - v7 = v7<<(64-24) | v7>>24 - v3 += m[8] - v3 += v4 - v14 ^= v3 - v14 = v14<<(64-32) | v14>>32 - v9 += v14 - v4 ^= v9 - v4 = v4<<(64-24) | v4>>24 - v2 += m[2] - v2 += v7 - v13 ^= v2 - v13 = v13<<(64-16) | v13>>16 - v8 += v13 - v7 ^= v8 - v7 = v7<<(64-63) | v7>>63 - v3 += m[11] - v3 += v4 - v14 ^= v3 - v14 = v14<<(64-16) | v14>>16 - v9 += v14 - v4 ^= v9 - v4 = v4<<(64-63) | v4>>63 - v1 += m[3] - v1 += v6 - v12 ^= v1 - v12 = v12<<(64-16) | v12>>16 - v11 += v12 - v6 ^= v11 - v6 = v6<<(64-63) | v6>>63 - v0 += m[7] - v0 += v5 - v15 ^= v0 - v15 = v15<<(64-16) | v15>>16 - v10 += v15 - v5 ^= v10 - v5 = v5<<(64-63) | v5>>63 - - // Round 8. - v0 += m[13] - v0 += v4 - v12 ^= v0 - v12 = v12<<(64-32) | v12>>32 - v8 += v12 - v4 ^= v8 - v4 = v4<<(64-24) | v4>>24 - v1 += m[7] - v1 += v5 - v13 ^= v1 - v13 = v13<<(64-32) | v13>>32 - v9 += v13 - v5 ^= v9 - v5 = v5<<(64-24) | v5>>24 - v2 += m[12] - v2 += v6 - v14 ^= v2 - v14 = v14<<(64-32) | v14>>32 - v10 += v14 - v6 ^= v10 - v6 = v6<<(64-24) | v6>>24 - v3 += m[3] - v3 += v7 - v15 ^= v3 - v15 = v15<<(64-32) | v15>>32 - v11 += v15 - v7 ^= v11 - v7 = v7<<(64-24) | v7>>24 - v2 += m[1] - v2 += v6 - v14 ^= v2 - v14 = v14<<(64-16) | v14>>16 - v10 += v14 - v6 ^= v10 - v6 = v6<<(64-63) | v6>>63 - v3 += m[9] - v3 += v7 - v15 ^= v3 - v15 = v15<<(64-16) | v15>>16 - v11 += v15 - v7 ^= v11 - v7 = v7<<(64-63) | v7>>63 - v1 += m[14] - v1 += v5 - v13 ^= v1 - v13 = v13<<(64-16) | v13>>16 - v9 += v13 - v5 ^= v9 - v5 = v5<<(64-63) | v5>>63 - v0 += m[11] - v0 += v4 - v12 ^= v0 - v12 = v12<<(64-16) | v12>>16 - v8 += v12 - v4 ^= v8 - v4 = v4<<(64-63) | v4>>63 - v0 += m[5] - v0 += v5 - v15 ^= v0 - v15 = v15<<(64-32) | v15>>32 - v10 += v15 - v5 ^= v10 - v5 = v5<<(64-24) | v5>>24 - v1 += m[15] - v1 += v6 - v12 ^= v1 - v12 = v12<<(64-32) | v12>>32 - v11 += v12 - v6 ^= v11 - v6 = v6<<(64-24) | v6>>24 - v2 += m[8] - v2 += v7 - v13 ^= v2 - v13 = v13<<(64-32) | v13>>32 - v8 += v13 - v7 ^= v8 - v7 = v7<<(64-24) | v7>>24 - v3 += m[2] - v3 += v4 - v14 ^= v3 - v14 = v14<<(64-32) | v14>>32 - v9 += v14 - v4 ^= v9 - v4 = v4<<(64-24) | v4>>24 - v2 += m[6] - v2 += v7 - v13 ^= v2 - v13 = v13<<(64-16) | v13>>16 - v8 += v13 - v7 ^= v8 - v7 = v7<<(64-63) | v7>>63 - v3 += m[10] - v3 += v4 - v14 ^= v3 - v14 = v14<<(64-16) | v14>>16 - v9 += v14 - v4 ^= v9 - v4 = v4<<(64-63) | v4>>63 - v1 += m[4] - v1 += v6 - v12 ^= v1 - v12 = v12<<(64-16) | v12>>16 - v11 += v12 - v6 ^= v11 - v6 = v6<<(64-63) | v6>>63 - v0 += m[0] - v0 += v5 - v15 ^= v0 - v15 = v15<<(64-16) | v15>>16 - v10 += v15 - v5 ^= v10 - v5 = v5<<(64-63) | v5>>63 - - // Round 9. - v0 += m[6] - v0 += v4 - v12 ^= v0 - v12 = v12<<(64-32) | v12>>32 - v8 += v12 - v4 ^= v8 - v4 = v4<<(64-24) | v4>>24 - v1 += m[14] - v1 += v5 - v13 ^= v1 - v13 = v13<<(64-32) | v13>>32 - v9 += v13 - v5 ^= v9 - v5 = v5<<(64-24) | v5>>24 - v2 += m[11] - v2 += v6 - v14 ^= v2 - v14 = v14<<(64-32) | v14>>32 - v10 += v14 - v6 ^= v10 - v6 = v6<<(64-24) | v6>>24 - v3 += m[0] - v3 += v7 - v15 ^= v3 - v15 = v15<<(64-32) | v15>>32 - v11 += v15 - v7 ^= v11 - v7 = v7<<(64-24) | v7>>24 - v2 += m[3] - v2 += v6 - v14 ^= v2 - v14 = v14<<(64-16) | v14>>16 - v10 += v14 - v6 ^= v10 - v6 = v6<<(64-63) | v6>>63 - v3 += m[8] - v3 += v7 - v15 ^= v3 - v15 = v15<<(64-16) | v15>>16 - v11 += v15 - v7 ^= v11 - v7 = v7<<(64-63) | v7>>63 - v1 += m[9] - v1 += v5 - v13 ^= v1 - v13 = v13<<(64-16) | v13>>16 - v9 += v13 - v5 ^= v9 - v5 = v5<<(64-63) | v5>>63 - v0 += m[15] - v0 += v4 - v12 ^= v0 - v12 = v12<<(64-16) | v12>>16 - v8 += v12 - v4 ^= v8 - v4 = v4<<(64-63) | v4>>63 - v0 += m[12] - v0 += v5 - v15 ^= v0 - v15 = v15<<(64-32) | v15>>32 - v10 += v15 - v5 ^= v10 - v5 = v5<<(64-24) | v5>>24 - v1 += m[13] - v1 += v6 - v12 ^= v1 - v12 = v12<<(64-32) | v12>>32 - v11 += v12 - v6 ^= v11 - v6 = v6<<(64-24) | v6>>24 - v2 += m[1] - v2 += v7 - v13 ^= v2 - v13 = v13<<(64-32) | v13>>32 - v8 += v13 - v7 ^= v8 - v7 = v7<<(64-24) | v7>>24 - v3 += m[10] - v3 += v4 - v14 ^= v3 - v14 = v14<<(64-32) | v14>>32 - v9 += v14 - v4 ^= v9 - v4 = v4<<(64-24) | v4>>24 - v2 += m[4] - v2 += v7 - v13 ^= v2 - v13 = v13<<(64-16) | v13>>16 - v8 += v13 - v7 ^= v8 - v7 = v7<<(64-63) | v7>>63 - v3 += m[5] - v3 += v4 - v14 ^= v3 - v14 = v14<<(64-16) | v14>>16 - v9 += v14 - v4 ^= v9 - v4 = v4<<(64-63) | v4>>63 - v1 += m[7] - v1 += v6 - v12 ^= v1 - v12 = v12<<(64-16) | v12>>16 - v11 += v12 - v6 ^= v11 - v6 = v6<<(64-63) | v6>>63 - v0 += m[2] - v0 += v5 - v15 ^= v0 - v15 = v15<<(64-16) | v15>>16 - v10 += v15 - v5 ^= v10 - v5 = v5<<(64-63) | v5>>63 - - // Round 10. - v0 += m[10] - v0 += v4 - v12 ^= v0 - v12 = v12<<(64-32) | v12>>32 - v8 += v12 - v4 ^= v8 - v4 = v4<<(64-24) | v4>>24 - v1 += m[8] - v1 += v5 - v13 ^= v1 - v13 = v13<<(64-32) | v13>>32 - v9 += v13 - v5 ^= v9 - v5 = v5<<(64-24) | v5>>24 - v2 += m[7] - v2 += v6 - v14 ^= v2 - v14 = v14<<(64-32) | v14>>32 - v10 += v14 - v6 ^= v10 - v6 = v6<<(64-24) | v6>>24 - v3 += m[1] - v3 += v7 - v15 ^= v3 - v15 = v15<<(64-32) | v15>>32 - v11 += v15 - v7 ^= v11 - v7 = v7<<(64-24) | v7>>24 - v2 += m[6] - v2 += v6 - v14 ^= v2 - v14 = v14<<(64-16) | v14>>16 - v10 += v14 - v6 ^= v10 - v6 = v6<<(64-63) | v6>>63 - v3 += m[5] - v3 += v7 - v15 ^= v3 - v15 = v15<<(64-16) | v15>>16 - v11 += v15 - v7 ^= v11 - v7 = v7<<(64-63) | v7>>63 - v1 += m[4] - v1 += v5 - v13 ^= v1 - v13 = v13<<(64-16) | v13>>16 - v9 += v13 - v5 ^= v9 - v5 = v5<<(64-63) | v5>>63 - v0 += m[2] - v0 += v4 - v12 ^= v0 - v12 = v12<<(64-16) | v12>>16 - v8 += v12 - v4 ^= v8 - v4 = v4<<(64-63) | v4>>63 - v0 += m[15] - v0 += v5 - v15 ^= v0 - v15 = v15<<(64-32) | v15>>32 - v10 += v15 - v5 ^= v10 - v5 = v5<<(64-24) | v5>>24 - v1 += m[9] - v1 += v6 - v12 ^= v1 - v12 = v12<<(64-32) | v12>>32 - v11 += v12 - v6 ^= v11 - v6 = v6<<(64-24) | v6>>24 - v2 += m[3] - v2 += v7 - v13 ^= v2 - v13 = v13<<(64-32) | v13>>32 - v8 += v13 - v7 ^= v8 - v7 = v7<<(64-24) | v7>>24 - v3 += m[13] - v3 += v4 - v14 ^= v3 - v14 = v14<<(64-32) | v14>>32 - v9 += v14 - v4 ^= v9 - v4 = v4<<(64-24) | v4>>24 - v2 += m[12] - v2 += v7 - v13 ^= v2 - v13 = v13<<(64-16) | v13>>16 - v8 += v13 - v7 ^= v8 - v7 = v7<<(64-63) | v7>>63 - v3 += m[0] - v3 += v4 - v14 ^= v3 - v14 = v14<<(64-16) | v14>>16 - v9 += v14 - v4 ^= v9 - v4 = v4<<(64-63) | v4>>63 - v1 += m[14] - v1 += v6 - v12 ^= v1 - v12 = v12<<(64-16) | v12>>16 - v11 += v12 - v6 ^= v11 - v6 = v6<<(64-63) | v6>>63 - v0 += m[11] - v0 += v5 - v15 ^= v0 - v15 = v15<<(64-16) | v15>>16 - v10 += v15 - v5 ^= v10 - v5 = v5<<(64-63) | v5>>63 - - // Round 11. - v0 += m[0] - v0 += v4 - v12 ^= v0 - v12 = v12<<(64-32) | v12>>32 - v8 += v12 - v4 ^= v8 - v4 = v4<<(64-24) | v4>>24 - v1 += m[2] - v1 += v5 - v13 ^= v1 - v13 = v13<<(64-32) | v13>>32 - v9 += v13 - v5 ^= v9 - v5 = v5<<(64-24) | v5>>24 - v2 += m[4] - v2 += v6 - v14 ^= v2 - v14 = v14<<(64-32) | v14>>32 - v10 += v14 - v6 ^= v10 - v6 = v6<<(64-24) | v6>>24 - v3 += m[6] - v3 += v7 - v15 ^= v3 - v15 = v15<<(64-32) | v15>>32 - v11 += v15 - v7 ^= v11 - v7 = v7<<(64-24) | v7>>24 - v2 += m[5] - v2 += v6 - v14 ^= v2 - v14 = v14<<(64-16) | v14>>16 - v10 += v14 - v6 ^= v10 - v6 = v6<<(64-63) | v6>>63 - v3 += m[7] - v3 += v7 - v15 ^= v3 - v15 = v15<<(64-16) | v15>>16 - v11 += v15 - v7 ^= v11 - v7 = v7<<(64-63) | v7>>63 - v1 += m[3] - v1 += v5 - v13 ^= v1 - v13 = v13<<(64-16) | v13>>16 - v9 += v13 - v5 ^= v9 - v5 = v5<<(64-63) | v5>>63 - v0 += m[1] - v0 += v4 - v12 ^= v0 - v12 = v12<<(64-16) | v12>>16 - v8 += v12 - v4 ^= v8 - v4 = v4<<(64-63) | v4>>63 - v0 += m[8] - v0 += v5 - v15 ^= v0 - v15 = v15<<(64-32) | v15>>32 - v10 += v15 - v5 ^= v10 - v5 = v5<<(64-24) | v5>>24 - v1 += m[10] - v1 += v6 - v12 ^= v1 - v12 = v12<<(64-32) | v12>>32 - v11 += v12 - v6 ^= v11 - v6 = v6<<(64-24) | v6>>24 - v2 += m[12] - v2 += v7 - v13 ^= v2 - v13 = v13<<(64-32) | v13>>32 - v8 += v13 - v7 ^= v8 - v7 = v7<<(64-24) | v7>>24 - v3 += m[14] - v3 += v4 - v14 ^= v3 - v14 = v14<<(64-32) | v14>>32 - v9 += v14 - v4 ^= v9 - v4 = v4<<(64-24) | v4>>24 - v2 += m[13] - v2 += v7 - v13 ^= v2 - v13 = v13<<(64-16) | v13>>16 - v8 += v13 - v7 ^= v8 - v7 = v7<<(64-63) | v7>>63 - v3 += m[15] - v3 += v4 - v14 ^= v3 - v14 = v14<<(64-16) | v14>>16 - v9 += v14 - v4 ^= v9 - v4 = v4<<(64-63) | v4>>63 - v1 += m[11] - v1 += v6 - v12 ^= v1 - v12 = v12<<(64-16) | v12>>16 - v11 += v12 - v6 ^= v11 - v6 = v6<<(64-63) | v6>>63 - v0 += m[9] - v0 += v5 - v15 ^= v0 - v15 = v15<<(64-16) | v15>>16 - v10 += v15 - v5 ^= v10 - v5 = v5<<(64-63) | v5>>63 - - // Round 12. - v0 += m[14] - v0 += v4 - v12 ^= v0 - v12 = v12<<(64-32) | v12>>32 - v8 += v12 - v4 ^= v8 - v4 = v4<<(64-24) | v4>>24 - v1 += m[4] - v1 += v5 - v13 ^= v1 - v13 = v13<<(64-32) | v13>>32 - v9 += v13 - v5 ^= v9 - v5 = v5<<(64-24) | v5>>24 - v2 += m[9] - v2 += v6 - v14 ^= v2 - v14 = v14<<(64-32) | v14>>32 - v10 += v14 - v6 ^= v10 - v6 = v6<<(64-24) | v6>>24 - v3 += m[13] - v3 += v7 - v15 ^= v3 - v15 = v15<<(64-32) | v15>>32 - v11 += v15 - v7 ^= v11 - v7 = v7<<(64-24) | v7>>24 - v2 += m[15] - v2 += v6 - v14 ^= v2 - v14 = v14<<(64-16) | v14>>16 - v10 += v14 - v6 ^= v10 - v6 = v6<<(64-63) | v6>>63 - v3 += m[6] - v3 += v7 - v15 ^= v3 - v15 = v15<<(64-16) | v15>>16 - v11 += v15 - v7 ^= v11 - v7 = v7<<(64-63) | v7>>63 - v1 += m[8] - v1 += v5 - v13 ^= v1 - v13 = v13<<(64-16) | v13>>16 - v9 += v13 - v5 ^= v9 - v5 = v5<<(64-63) | v5>>63 - v0 += m[10] - v0 += v4 - v12 ^= v0 - v12 = v12<<(64-16) | v12>>16 - v8 += v12 - v4 ^= v8 - v4 = v4<<(64-63) | v4>>63 - v0 += m[1] - v0 += v5 - v15 ^= v0 - v15 = v15<<(64-32) | v15>>32 - v10 += v15 - v5 ^= v10 - v5 = v5<<(64-24) | v5>>24 - v1 += m[0] - v1 += v6 - v12 ^= v1 - v12 = v12<<(64-32) | v12>>32 - v11 += v12 - v6 ^= v11 - v6 = v6<<(64-24) | v6>>24 - v2 += m[11] - v2 += v7 - v13 ^= v2 - v13 = v13<<(64-32) | v13>>32 - v8 += v13 - v7 ^= v8 - v7 = v7<<(64-24) | v7>>24 - v3 += m[5] - v3 += v4 - v14 ^= v3 - v14 = v14<<(64-32) | v14>>32 - v9 += v14 - v4 ^= v9 - v4 = v4<<(64-24) | v4>>24 - v2 += m[7] - v2 += v7 - v13 ^= v2 - v13 = v13<<(64-16) | v13>>16 - v8 += v13 - v7 ^= v8 - v7 = v7<<(64-63) | v7>>63 - v3 += m[3] - v3 += v4 - v14 ^= v3 - v14 = v14<<(64-16) | v14>>16 - v9 += v14 - v4 ^= v9 - v4 = v4<<(64-63) | v4>>63 - v1 += m[2] - v1 += v6 - v12 ^= v1 - v12 = v12<<(64-16) | v12>>16 - v11 += v12 - v6 ^= v11 - v6 = v6<<(64-63) | v6>>63 - v0 += m[12] - v0 += v5 - v15 ^= v0 - v15 = v15<<(64-16) | v15>>16 - v10 += v15 - v5 ^= v10 - v5 = v5<<(64-63) | v5>>63 - - h0 ^= v0 ^ v8 - h1 ^= v1 ^ v9 - h2 ^= v2 ^ v10 - h3 ^= v3 ^ v11 - h4 ^= v4 ^ v12 - h5 ^= v5 ^ v13 - h6 ^= v6 ^ v14 - h7 ^= v7 ^ v15 - - p = p[BlockSize:] - } - d.h[0], d.h[1], d.h[2], d.h[3], d.h[4], d.h[5], d.h[6], d.h[7] = h0, h1, h2, h3, h4, h5, h6, h7 -} diff --git a/vendor/github.com/minio/sha256-simd/.gitignore b/vendor/github.com/minio/sha256-simd/.gitignore deleted file mode 100644 index c56069f..0000000 --- a/vendor/github.com/minio/sha256-simd/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.test \ No newline at end of file diff --git a/vendor/github.com/minio/sha256-simd/LICENSE b/vendor/github.com/minio/sha256-simd/LICENSE deleted file mode 100644 index d645695..0000000 --- a/vendor/github.com/minio/sha256-simd/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/minio/sha256-simd/README.md b/vendor/github.com/minio/sha256-simd/README.md deleted file mode 100644 index 6117488..0000000 --- a/vendor/github.com/minio/sha256-simd/README.md +++ /dev/null @@ -1,137 +0,0 @@ -# sha256-simd - -Accelerate SHA256 computations in pure Go using AVX512, SHA Extensions for x86 and ARM64 for ARM. -On AVX512 it provides an up to 8x improvement (over 3 GB/s per core). -SHA Extensions give a performance boost of close to 4x over native. - -## Introduction - -This package is designed as a replacement for `crypto/sha256`. -For ARM CPUs with the Cryptography Extensions, advantage is taken of the SHA2 instructions resulting in a massive performance improvement. - -This package uses Golang assembly. -The AVX512 version is based on the Intel's "multi-buffer crypto library for IPSec" whereas the other Intel implementations are described in "Fast SHA-256 Implementations on Intel Architecture Processors" by J. Guilford et al. - -## Support for Intel SHA Extensions - -Support for the Intel SHA Extensions has been added by Kristofer Peterson (@svenski123), originally developed for spacemeshos [here](https://github.com/spacemeshos/POET/issues/23). On CPUs that support it (known thus far Intel Celeron J3455 and AMD Ryzen) it gives a significant boost in performance (with thanks to @AudriusButkevicius for reporting the results; full results [here](https://github.com/minio/sha256-simd/pull/37#issuecomment-451607827)). - -``` -$ benchcmp avx2.txt sha-ext.txt -benchmark AVX2 MB/s SHA Ext MB/s speedup -BenchmarkHash5M 514.40 1975.17 3.84x -``` - -Thanks to Kristofer Peterson, we also added additional performance changes such as optimized padding, -endian conversions which sped up all implementations i.e. Intel SHA alone while doubled performance for small sizes, -the other changes increased everything roughly 50%. - -## Support for AVX512 - -We have added support for AVX512 which results in an up to 8x performance improvement over AVX2 (3.0 GHz Xeon Platinum 8124M CPU): - -``` -$ benchcmp avx2.txt avx512.txt -benchmark AVX2 MB/s AVX512 MB/s speedup -BenchmarkHash5M 448.62 3498.20 7.80x -``` - -The original code was developed by Intel as part of the [multi-buffer crypto library](https://github.com/intel/intel-ipsec-mb) for IPSec or more specifically this [AVX512](https://github.com/intel/intel-ipsec-mb/blob/master/avx512/sha256_x16_avx512.asm) implementation. The key idea behind it is to process a total of 16 checksums in parallel by “transposing” 16 (independent) messages of 64 bytes between a total of 16 ZMM registers (each 64 bytes wide). - -Transposing the input messages means that in order to take full advantage of the speedup you need to have a (server) workload where multiple threads are doing SHA256 calculations in parallel. Unfortunately for this algorithm it is not possible for two message blocks processed in parallel to be dependent on one another — because then the (interim) result of the first part of the message has to be an input into the processing of the second part of the message. - -Whereas the original Intel C implementation requires some sort of explicit scheduling of messages to be processed in parallel, for Golang it makes sense to take advantage of channels in order to group messages together and use channels as well for sending back the results (thereby effectively decoupling the calculations). We have implemented a fairly simple scheduling mechanism that seems to work well in practice. - -Due to this different way of scheduling, we decided to use an explicit method to instantiate the AVX512 version. Essentially one or more AVX512 processing servers ([`Avx512Server`](https://github.com/minio/sha256-simd/blob/master/sha256blockAvx512_amd64.go#L294)) have to be created whereby each server can hash over 3 GB/s on a single core. An `hash.Hash` object ([`Avx512Digest`](https://github.com/minio/sha256-simd/blob/master/sha256blockAvx512_amd64.go#L45)) is then instantiated using one of these servers and used in the regular fashion: - -```go -import "github.com/minio/sha256-simd" - -func main() { - server := sha256.NewAvx512Server() - h512 := sha256.NewAvx512(server) - h512.Write(fileBlock) - digest := h512.Sum([]byte{}) -} -``` - -Note that, because of the scheduling overhead, for small messages (< 1 MB) you will be better off using the regular SHA256 hashing (but those are typically not performance critical anyway). Some other tips to get the best performance: -* Have many go routines doing SHA256 calculations in parallel. -* Try to Write() messages in multiples of 64 bytes. -* Try to keep the overall length of messages to a roughly similar size ie. 5 MB (this way all 16 ‘lanes’ in the AVX512 computations are contributing as much as possible). - -More detailed information can be found in this [blog](https://blog.minio.io/accelerate-sha256-up-to-8x-over-3-gb-s-per-core-with-avx512-a0b1d64f78f) post including scaling across cores. - -## Drop-In Replacement - -The following code snippet shows how you can use `github.com/minio/sha256-simd`. -This will automatically select the fastest method for the architecture on which it will be executed. - -```go -import "github.com/minio/sha256-simd" - -func main() { - ... - shaWriter := sha256.New() - io.Copy(shaWriter, file) - ... -} -``` - -## Performance - -Below is the speed in MB/s for a single core (ranked fast to slow) for blocks larger than 1 MB. - -| Processor | SIMD | Speed (MB/s) | -| --------------------------------- | ------- | ------------:| -| 3.0 GHz Intel Xeon Platinum 8124M | AVX512 | 3498 | -| 3.7 GHz AMD Ryzen 7 2700X | SHA Ext | 1979 | -| 1.2 GHz ARM Cortex-A53 | ARM64 | 638 | - -## asm2plan9s - -In order to be able to work more easily with AVX512/AVX2 instructions, a separate tool was developed to convert SIMD instructions into the corresponding BYTE sequence as accepted by Go assembly. See [asm2plan9s](https://github.com/minio/asm2plan9s) for more information. - -## Why and benefits - -One of the most performance sensitive parts of the [Minio](https://github.com/minio/minio) object storage server is related to SHA256 hash sums calculations. For instance during multi part uploads each part that is uploaded needs to be verified for data integrity by the server. - -Other applications that can benefit from enhanced SHA256 performance are deduplication in storage systems, intrusion detection, version control systems, integrity checking, etc. - -## ARM SHA Extensions - -The 64-bit ARMv8 core has introduced new instructions for SHA1 and SHA2 acceleration as part of the [Cryptography Extensions](http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0501f/CHDFJBCJ.html). Below you can see a small excerpt highlighting one of the rounds as is done for the SHA256 calculation process (for full code see [sha256block_arm64.s](https://github.com/minio/sha256-simd/blob/master/sha256block_arm64.s)). - - ``` - sha256h q2, q3, v9.4s - sha256h2 q3, q4, v9.4s - sha256su0 v5.4s, v6.4s - rev32 v8.16b, v8.16b - add v9.4s, v7.4s, v18.4s - mov v4.16b, v2.16b - sha256h q2, q3, v10.4s - sha256h2 q3, q4, v10.4s - sha256su0 v6.4s, v7.4s - sha256su1 v5.4s, v7.4s, v8.4s - ``` - -### Detailed benchmarks - -Benchmarks generated on a 1.2 Ghz Quad-Core ARM Cortex A53 equipped [Pine64](https://www.pine64.com/). - -``` -minio@minio-arm:$ benchcmp golang.txt arm64.txt -benchmark golang arm64 speedup -BenchmarkHash8Bytes-4 0.68 MB/s 5.70 MB/s 8.38x -BenchmarkHash1K-4 5.65 MB/s 326.30 MB/s 57.75x -BenchmarkHash8K-4 6.00 MB/s 570.63 MB/s 95.11x -BenchmarkHash1M-4 6.05 MB/s 638.23 MB/s 105.49x -``` - -## License - -Released under the Apache License v2.0. You can find the complete text in the file LICENSE. - -## Contributing - -Contributions are welcome, please send PRs for any enhancements. diff --git a/vendor/github.com/minio/sha256-simd/cpuid_other.go b/vendor/github.com/minio/sha256-simd/cpuid_other.go deleted file mode 100644 index 35e5d11..0000000 --- a/vendor/github.com/minio/sha256-simd/cpuid_other.go +++ /dev/null @@ -1,20 +0,0 @@ -// Minio Cloud Storage, (C) 2021 Minio, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package sha256 - -func hasArmSha2() bool { - return false -} diff --git a/vendor/github.com/minio/sha256-simd/sha256.go b/vendor/github.com/minio/sha256-simd/sha256.go deleted file mode 100644 index b137ead..0000000 --- a/vendor/github.com/minio/sha256-simd/sha256.go +++ /dev/null @@ -1,399 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sha256 - -import ( - "crypto/sha256" - "encoding/binary" - "hash" - "runtime" - - "github.com/klauspost/cpuid/v2" -) - -// Size - The size of a SHA256 checksum in bytes. -const Size = 32 - -// BlockSize - The blocksize of SHA256 in bytes. -const BlockSize = 64 - -const ( - chunk = BlockSize - init0 = 0x6A09E667 - init1 = 0xBB67AE85 - init2 = 0x3C6EF372 - init3 = 0xA54FF53A - init4 = 0x510E527F - init5 = 0x9B05688C - init6 = 0x1F83D9AB - init7 = 0x5BE0CD19 -) - -// digest represents the partial evaluation of a checksum. -type digest struct { - h [8]uint32 - x [chunk]byte - nx int - len uint64 -} - -// Reset digest back to default -func (d *digest) Reset() { - d.h[0] = init0 - d.h[1] = init1 - d.h[2] = init2 - d.h[3] = init3 - d.h[4] = init4 - d.h[5] = init5 - d.h[6] = init6 - d.h[7] = init7 - d.nx = 0 - d.len = 0 -} - -type blockfuncType int - -const ( - blockfuncGeneric blockfuncType = iota - blockfuncSha blockfuncType = iota - blockfuncArm blockfuncType = iota -) - -var blockfunc blockfuncType - -func init() { - blockfunc = blockfuncGeneric - switch { - case hasSHAExtensions(): - blockfunc = blockfuncSha - case hasArmSha2(): - blockfunc = blockfuncArm - default: - blockfunc = blockfuncGeneric - } -} - -var avx512 = cpuid.CPU.Supports(cpuid.AVX512F, cpuid.AVX512DQ, cpuid.AVX512BW, cpuid.AVX512VL) - -// hasSHAExtensions return whether the cpu supports SHA extensions. -func hasSHAExtensions() bool { - return cpuid.CPU.Supports(cpuid.SHA, cpuid.SSSE3, cpuid.SSE4) && runtime.GOARCH == "amd64" -} - -// New returns a new hash.Hash computing the SHA256 checksum. -func New() hash.Hash { - if blockfunc != blockfuncGeneric { - d := new(digest) - d.Reset() - return d - } - // Fallback to the standard golang implementation - // if no features were found. - return sha256.New() -} - -// Sum256 - single caller sha256 helper -func Sum256(data []byte) (result [Size]byte) { - var d digest - d.Reset() - d.Write(data) - result = d.checkSum() - return -} - -// Return size of checksum -func (d *digest) Size() int { return Size } - -// Return blocksize of checksum -func (d *digest) BlockSize() int { return BlockSize } - -// Write to digest -func (d *digest) Write(p []byte) (nn int, err error) { - nn = len(p) - d.len += uint64(nn) - if d.nx > 0 { - n := copy(d.x[d.nx:], p) - d.nx += n - if d.nx == chunk { - block(d, d.x[:]) - d.nx = 0 - } - p = p[n:] - } - if len(p) >= chunk { - n := len(p) &^ (chunk - 1) - block(d, p[:n]) - p = p[n:] - } - if len(p) > 0 { - d.nx = copy(d.x[:], p) - } - return -} - -// Return sha256 sum in bytes -func (d *digest) Sum(in []byte) []byte { - // Make a copy of d0 so that caller can keep writing and summing. - d0 := *d - hash := d0.checkSum() - return append(in, hash[:]...) -} - -// Intermediate checksum function -func (d *digest) checkSum() (digest [Size]byte) { - n := d.nx - - var k [64]byte - copy(k[:], d.x[:n]) - - k[n] = 0x80 - - if n >= 56 { - block(d, k[:]) - - // clear block buffer - go compiles this to optimal 1x xorps + 4x movups - // unfortunately expressing this more succinctly results in much worse code - k[0] = 0 - k[1] = 0 - k[2] = 0 - k[3] = 0 - k[4] = 0 - k[5] = 0 - k[6] = 0 - k[7] = 0 - k[8] = 0 - k[9] = 0 - k[10] = 0 - k[11] = 0 - k[12] = 0 - k[13] = 0 - k[14] = 0 - k[15] = 0 - k[16] = 0 - k[17] = 0 - k[18] = 0 - k[19] = 0 - k[20] = 0 - k[21] = 0 - k[22] = 0 - k[23] = 0 - k[24] = 0 - k[25] = 0 - k[26] = 0 - k[27] = 0 - k[28] = 0 - k[29] = 0 - k[30] = 0 - k[31] = 0 - k[32] = 0 - k[33] = 0 - k[34] = 0 - k[35] = 0 - k[36] = 0 - k[37] = 0 - k[38] = 0 - k[39] = 0 - k[40] = 0 - k[41] = 0 - k[42] = 0 - k[43] = 0 - k[44] = 0 - k[45] = 0 - k[46] = 0 - k[47] = 0 - k[48] = 0 - k[49] = 0 - k[50] = 0 - k[51] = 0 - k[52] = 0 - k[53] = 0 - k[54] = 0 - k[55] = 0 - k[56] = 0 - k[57] = 0 - k[58] = 0 - k[59] = 0 - k[60] = 0 - k[61] = 0 - k[62] = 0 - k[63] = 0 - } - binary.BigEndian.PutUint64(k[56:64], uint64(d.len)<<3) - block(d, k[:]) - - { - const i = 0 - binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) - } - { - const i = 1 - binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) - } - { - const i = 2 - binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) - } - { - const i = 3 - binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) - } - { - const i = 4 - binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) - } - { - const i = 5 - binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) - } - { - const i = 6 - binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) - } - { - const i = 7 - binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) - } - - return -} - -func block(dig *digest, p []byte) { - if blockfunc == blockfuncSha { - blockShaGo(dig, p) - } else if blockfunc == blockfuncArm { - blockArmGo(dig, p) - } else if blockfunc == blockfuncGeneric { - blockGeneric(dig, p) - } -} - -func blockGeneric(dig *digest, p []byte) { - var w [64]uint32 - h0, h1, h2, h3, h4, h5, h6, h7 := dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] - for len(p) >= chunk { - // Can interlace the computation of w with the - // rounds below if needed for speed. - for i := 0; i < 16; i++ { - j := i * 4 - w[i] = uint32(p[j])<<24 | uint32(p[j+1])<<16 | uint32(p[j+2])<<8 | uint32(p[j+3]) - } - for i := 16; i < 64; i++ { - v1 := w[i-2] - t1 := (v1>>17 | v1<<(32-17)) ^ (v1>>19 | v1<<(32-19)) ^ (v1 >> 10) - v2 := w[i-15] - t2 := (v2>>7 | v2<<(32-7)) ^ (v2>>18 | v2<<(32-18)) ^ (v2 >> 3) - w[i] = t1 + w[i-7] + t2 + w[i-16] - } - - a, b, c, d, e, f, g, h := h0, h1, h2, h3, h4, h5, h6, h7 - - for i := 0; i < 64; i++ { - t1 := h + ((e>>6 | e<<(32-6)) ^ (e>>11 | e<<(32-11)) ^ (e>>25 | e<<(32-25))) + ((e & f) ^ (^e & g)) + _K[i] + w[i] - - t2 := ((a>>2 | a<<(32-2)) ^ (a>>13 | a<<(32-13)) ^ (a>>22 | a<<(32-22))) + ((a & b) ^ (a & c) ^ (b & c)) - - h = g - g = f - f = e - e = d + t1 - d = c - c = b - b = a - a = t1 + t2 - } - - h0 += a - h1 += b - h2 += c - h3 += d - h4 += e - h5 += f - h6 += g - h7 += h - - p = p[chunk:] - } - - dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] = h0, h1, h2, h3, h4, h5, h6, h7 -} - -var _K = []uint32{ - 0x428a2f98, - 0x71374491, - 0xb5c0fbcf, - 0xe9b5dba5, - 0x3956c25b, - 0x59f111f1, - 0x923f82a4, - 0xab1c5ed5, - 0xd807aa98, - 0x12835b01, - 0x243185be, - 0x550c7dc3, - 0x72be5d74, - 0x80deb1fe, - 0x9bdc06a7, - 0xc19bf174, - 0xe49b69c1, - 0xefbe4786, - 0x0fc19dc6, - 0x240ca1cc, - 0x2de92c6f, - 0x4a7484aa, - 0x5cb0a9dc, - 0x76f988da, - 0x983e5152, - 0xa831c66d, - 0xb00327c8, - 0xbf597fc7, - 0xc6e00bf3, - 0xd5a79147, - 0x06ca6351, - 0x14292967, - 0x27b70a85, - 0x2e1b2138, - 0x4d2c6dfc, - 0x53380d13, - 0x650a7354, - 0x766a0abb, - 0x81c2c92e, - 0x92722c85, - 0xa2bfe8a1, - 0xa81a664b, - 0xc24b8b70, - 0xc76c51a3, - 0xd192e819, - 0xd6990624, - 0xf40e3585, - 0x106aa070, - 0x19a4c116, - 0x1e376c08, - 0x2748774c, - 0x34b0bcb5, - 0x391c0cb3, - 0x4ed8aa4a, - 0x5b9cca4f, - 0x682e6ff3, - 0x748f82ee, - 0x78a5636f, - 0x84c87814, - 0x8cc70208, - 0x90befffa, - 0xa4506ceb, - 0xbef9a3f7, - 0xc67178f2, -} diff --git a/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.asm b/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.asm deleted file mode 100644 index c959b1a..0000000 --- a/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.asm +++ /dev/null @@ -1,686 +0,0 @@ - -// 16x Parallel implementation of SHA256 for AVX512 - -// -// Minio Cloud Storage, (C) 2017 Minio, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// -// This code is based on the Intel Multi-Buffer Crypto for IPSec library -// and more specifically the following implementation: -// https://github.com/intel/intel-ipsec-mb/blob/master/avx512/sha256_x16_avx512.asm -// -// For Golang it has been converted into Plan 9 assembly with the help of -// github.com/minio/asm2plan9s to assemble the AVX512 instructions -// - -// Copyright (c) 2017, Intel Corporation -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// * Redistributions of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of Intel Corporation nor the names of its contributors -// may be used to endorse or promote products derived from this software -// without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE -// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#define SHA256_DIGEST_ROW_SIZE 64 - -// arg1 -#define STATE rdi -#define STATE_P9 DI -// arg2 -#define INP_SIZE rsi -#define INP_SIZE_P9 SI - -#define IDX rcx -#define TBL rdx -#define TBL_P9 DX - -#define INPUT rax -#define INPUT_P9 AX - -#define inp0 r9 -#define SCRATCH_P9 R12 -#define SCRATCH r12 -#define maskp r13 -#define MASKP_P9 R13 -#define mask r14 -#define MASK_P9 R14 - -#define A zmm0 -#define B zmm1 -#define C zmm2 -#define D zmm3 -#define E zmm4 -#define F zmm5 -#define G zmm6 -#define H zmm7 -#define T1 zmm8 -#define TMP0 zmm9 -#define TMP1 zmm10 -#define TMP2 zmm11 -#define TMP3 zmm12 -#define TMP4 zmm13 -#define TMP5 zmm14 -#define TMP6 zmm15 - -#define W0 zmm16 -#define W1 zmm17 -#define W2 zmm18 -#define W3 zmm19 -#define W4 zmm20 -#define W5 zmm21 -#define W6 zmm22 -#define W7 zmm23 -#define W8 zmm24 -#define W9 zmm25 -#define W10 zmm26 -#define W11 zmm27 -#define W12 zmm28 -#define W13 zmm29 -#define W14 zmm30 -#define W15 zmm31 - - -#define TRANSPOSE16(_r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7, _r8, _r9, _r10, _r11, _r12, _r13, _r14, _r15, _t0, _t1) \ - \ - \ // input r0 = {a15 a14 a13 a12 a11 a10 a9 a8 a7 a6 a5 a4 a3 a2 a1 a0} - \ // r1 = {b15 b14 b13 b12 b11 b10 b9 b8 b7 b6 b5 b4 b3 b2 b1 b0} - \ // r2 = {c15 c14 c13 c12 c11 c10 c9 c8 c7 c6 c5 c4 c3 c2 c1 c0} - \ // r3 = {d15 d14 d13 d12 d11 d10 d9 d8 d7 d6 d5 d4 d3 d2 d1 d0} - \ // r4 = {e15 e14 e13 e12 e11 e10 e9 e8 e7 e6 e5 e4 e3 e2 e1 e0} - \ // r5 = {f15 f14 f13 f12 f11 f10 f9 f8 f7 f6 f5 f4 f3 f2 f1 f0} - \ // r6 = {g15 g14 g13 g12 g11 g10 g9 g8 g7 g6 g5 g4 g3 g2 g1 g0} - \ // r7 = {h15 h14 h13 h12 h11 h10 h9 h8 h7 h6 h5 h4 h3 h2 h1 h0} - \ // r8 = {i15 i14 i13 i12 i11 i10 i9 i8 i7 i6 i5 i4 i3 i2 i1 i0} - \ // r9 = {j15 j14 j13 j12 j11 j10 j9 j8 j7 j6 j5 j4 j3 j2 j1 j0} - \ // r10 = {k15 k14 k13 k12 k11 k10 k9 k8 k7 k6 k5 k4 k3 k2 k1 k0} - \ // r11 = {l15 l14 l13 l12 l11 l10 l9 l8 l7 l6 l5 l4 l3 l2 l1 l0} - \ // r12 = {m15 m14 m13 m12 m11 m10 m9 m8 m7 m6 m5 m4 m3 m2 m1 m0} - \ // r13 = {n15 n14 n13 n12 n11 n10 n9 n8 n7 n6 n5 n4 n3 n2 n1 n0} - \ // r14 = {o15 o14 o13 o12 o11 o10 o9 o8 o7 o6 o5 o4 o3 o2 o1 o0} - \ // r15 = {p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0} - \ - \ // output r0 = { p0 o0 n0 m0 l0 k0 j0 i0 h0 g0 f0 e0 d0 c0 b0 a0} - \ // r1 = { p1 o1 n1 m1 l1 k1 j1 i1 h1 g1 f1 e1 d1 c1 b1 a1} - \ // r2 = { p2 o2 n2 m2 l2 k2 j2 i2 h2 g2 f2 e2 d2 c2 b2 a2} - \ // r3 = { p3 o3 n3 m3 l3 k3 j3 i3 h3 g3 f3 e3 d3 c3 b3 a3} - \ // r4 = { p4 o4 n4 m4 l4 k4 j4 i4 h4 g4 f4 e4 d4 c4 b4 a4} - \ // r5 = { p5 o5 n5 m5 l5 k5 j5 i5 h5 g5 f5 e5 d5 c5 b5 a5} - \ // r6 = { p6 o6 n6 m6 l6 k6 j6 i6 h6 g6 f6 e6 d6 c6 b6 a6} - \ // r7 = { p7 o7 n7 m7 l7 k7 j7 i7 h7 g7 f7 e7 d7 c7 b7 a7} - \ // r8 = { p8 o8 n8 m8 l8 k8 j8 i8 h8 g8 f8 e8 d8 c8 b8 a8} - \ // r9 = { p9 o9 n9 m9 l9 k9 j9 i9 h9 g9 f9 e9 d9 c9 b9 a9} - \ // r10 = {p10 o10 n10 m10 l10 k10 j10 i10 h10 g10 f10 e10 d10 c10 b10 a10} - \ // r11 = {p11 o11 n11 m11 l11 k11 j11 i11 h11 g11 f11 e11 d11 c11 b11 a11} - \ // r12 = {p12 o12 n12 m12 l12 k12 j12 i12 h12 g12 f12 e12 d12 c12 b12 a12} - \ // r13 = {p13 o13 n13 m13 l13 k13 j13 i13 h13 g13 f13 e13 d13 c13 b13 a13} - \ // r14 = {p14 o14 n14 m14 l14 k14 j14 i14 h14 g14 f14 e14 d14 c14 b14 a14} - \ // r15 = {p15 o15 n15 m15 l15 k15 j15 i15 h15 g15 f15 e15 d15 c15 b15 a15} - \ - \ // process top half - vshufps _t0, _r0, _r1, 0x44 \ // t0 = {b13 b12 a13 a12 b9 b8 a9 a8 b5 b4 a5 a4 b1 b0 a1 a0} - vshufps _r0, _r0, _r1, 0xEE \ // r0 = {b15 b14 a15 a14 b11 b10 a11 a10 b7 b6 a7 a6 b3 b2 a3 a2} - vshufps _t1, _r2, _r3, 0x44 \ // t1 = {d13 d12 c13 c12 d9 d8 c9 c8 d5 d4 c5 c4 d1 d0 c1 c0} - vshufps _r2, _r2, _r3, 0xEE \ // r2 = {d15 d14 c15 c14 d11 d10 c11 c10 d7 d6 c7 c6 d3 d2 c3 c2} - \ - vshufps _r3, _t0, _t1, 0xDD \ // r3 = {d13 c13 b13 a13 d9 c9 b9 a9 d5 c5 b5 a5 d1 c1 b1 a1} - vshufps _r1, _r0, _r2, 0x88 \ // r1 = {d14 c14 b14 a14 d10 c10 b10 a10 d6 c6 b6 a6 d2 c2 b2 a2} - vshufps _r0, _r0, _r2, 0xDD \ // r0 = {d15 c15 b15 a15 d11 c11 b11 a11 d7 c7 b7 a7 d3 c3 b3 a3} - vshufps _t0, _t0, _t1, 0x88 \ // t0 = {d12 c12 b12 a12 d8 c8 b8 a8 d4 c4 b4 a4 d0 c0 b0 a0} - \ - \ // use r2 in place of t0 - vshufps _r2, _r4, _r5, 0x44 \ // r2 = {f13 f12 e13 e12 f9 f8 e9 e8 f5 f4 e5 e4 f1 f0 e1 e0} - vshufps _r4, _r4, _r5, 0xEE \ // r4 = {f15 f14 e15 e14 f11 f10 e11 e10 f7 f6 e7 e6 f3 f2 e3 e2} - vshufps _t1, _r6, _r7, 0x44 \ // t1 = {h13 h12 g13 g12 h9 h8 g9 g8 h5 h4 g5 g4 h1 h0 g1 g0} - vshufps _r6, _r6, _r7, 0xEE \ // r6 = {h15 h14 g15 g14 h11 h10 g11 g10 h7 h6 g7 g6 h3 h2 g3 g2} - \ - vshufps _r7, _r2, _t1, 0xDD \ // r7 = {h13 g13 f13 e13 h9 g9 f9 e9 h5 g5 f5 e5 h1 g1 f1 e1} - vshufps _r5, _r4, _r6, 0x88 \ // r5 = {h14 g14 f14 e14 h10 g10 f10 e10 h6 g6 f6 e6 h2 g2 f2 e2} - vshufps _r4, _r4, _r6, 0xDD \ // r4 = {h15 g15 f15 e15 h11 g11 f11 e11 h7 g7 f7 e7 h3 g3 f3 e3} - vshufps _r2, _r2, _t1, 0x88 \ // r2 = {h12 g12 f12 e12 h8 g8 f8 e8 h4 g4 f4 e4 h0 g0 f0 e0} - \ - \ // use r6 in place of t0 - vshufps _r6, _r8, _r9, 0x44 \ // r6 = {j13 j12 i13 i12 j9 j8 i9 i8 j5 j4 i5 i4 j1 j0 i1 i0} - vshufps _r8, _r8, _r9, 0xEE \ // r8 = {j15 j14 i15 i14 j11 j10 i11 i10 j7 j6 i7 i6 j3 j2 i3 i2} - vshufps _t1, _r10, _r11, 0x44 \ // t1 = {l13 l12 k13 k12 l9 l8 k9 k8 l5 l4 k5 k4 l1 l0 k1 k0} - vshufps _r10, _r10, _r11, 0xEE \ // r10 = {l15 l14 k15 k14 l11 l10 k11 k10 l7 l6 k7 k6 l3 l2 k3 k2} - \ - vshufps _r11, _r6, _t1, 0xDD \ // r11 = {l13 k13 j13 113 l9 k9 j9 i9 l5 k5 j5 i5 l1 k1 j1 i1} - vshufps _r9, _r8, _r10, 0x88 \ // r9 = {l14 k14 j14 114 l10 k10 j10 i10 l6 k6 j6 i6 l2 k2 j2 i2} - vshufps _r8, _r8, _r10, 0xDD \ // r8 = {l15 k15 j15 115 l11 k11 j11 i11 l7 k7 j7 i7 l3 k3 j3 i3} - vshufps _r6, _r6, _t1, 0x88 \ // r6 = {l12 k12 j12 112 l8 k8 j8 i8 l4 k4 j4 i4 l0 k0 j0 i0} - \ - \ // use r10 in place of t0 - vshufps _r10, _r12, _r13, 0x44 \ // r10 = {n13 n12 m13 m12 n9 n8 m9 m8 n5 n4 m5 m4 n1 n0 a1 m0} - vshufps _r12, _r12, _r13, 0xEE \ // r12 = {n15 n14 m15 m14 n11 n10 m11 m10 n7 n6 m7 m6 n3 n2 a3 m2} - vshufps _t1, _r14, _r15, 0x44 \ // t1 = {p13 p12 013 012 p9 p8 09 08 p5 p4 05 04 p1 p0 01 00} - vshufps _r14, _r14, _r15, 0xEE \ // r14 = {p15 p14 015 014 p11 p10 011 010 p7 p6 07 06 p3 p2 03 02} - \ - vshufps _r15, _r10, _t1, 0xDD \ // r15 = {p13 013 n13 m13 p9 09 n9 m9 p5 05 n5 m5 p1 01 n1 m1} - vshufps _r13, _r12, _r14, 0x88 \ // r13 = {p14 014 n14 m14 p10 010 n10 m10 p6 06 n6 m6 p2 02 n2 m2} - vshufps _r12, _r12, _r14, 0xDD \ // r12 = {p15 015 n15 m15 p11 011 n11 m11 p7 07 n7 m7 p3 03 n3 m3} - vshufps _r10, _r10, _t1, 0x88 \ // r10 = {p12 012 n12 m12 p8 08 n8 m8 p4 04 n4 m4 p0 00 n0 m0} - \ - \ // At this point, the registers that contain interesting data are: - \ // t0, r3, r1, r0, r2, r7, r5, r4, r6, r11, r9, r8, r10, r15, r13, r12 - \ // Can use t1 and r14 as scratch registers - LEAQ PSHUFFLE_TRANSPOSE16_MASK1<>(SB), BX \ - LEAQ PSHUFFLE_TRANSPOSE16_MASK2<>(SB), R8 \ - \ - vmovdqu32 _r14, [rbx] \ - vpermi2q _r14, _t0, _r2 \ // r14 = {h8 g8 f8 e8 d8 c8 b8 a8 h0 g0 f0 e0 d0 c0 b0 a0} - vmovdqu32 _t1, [r8] \ - vpermi2q _t1, _t0, _r2 \ // t1 = {h12 g12 f12 e12 d12 c12 b12 a12 h4 g4 f4 e4 d4 c4 b4 a4} - \ - vmovdqu32 _r2, [rbx] \ - vpermi2q _r2, _r3, _r7 \ // r2 = {h9 g9 f9 e9 d9 c9 b9 a9 h1 g1 f1 e1 d1 c1 b1 a1} - vmovdqu32 _t0, [r8] \ - vpermi2q _t0, _r3, _r7 \ // t0 = {h13 g13 f13 e13 d13 c13 b13 a13 h5 g5 f5 e5 d5 c5 b5 a5} - \ - vmovdqu32 _r3, [rbx] \ - vpermi2q _r3, _r1, _r5 \ // r3 = {h10 g10 f10 e10 d10 c10 b10 a10 h2 g2 f2 e2 d2 c2 b2 a2} - vmovdqu32 _r7, [r8] \ - vpermi2q _r7, _r1, _r5 \ // r7 = {h14 g14 f14 e14 d14 c14 b14 a14 h6 g6 f6 e6 d6 c6 b6 a6} - \ - vmovdqu32 _r1, [rbx] \ - vpermi2q _r1, _r0, _r4 \ // r1 = {h11 g11 f11 e11 d11 c11 b11 a11 h3 g3 f3 e3 d3 c3 b3 a3} - vmovdqu32 _r5, [r8] \ - vpermi2q _r5, _r0, _r4 \ // r5 = {h15 g15 f15 e15 d15 c15 b15 a15 h7 g7 f7 e7 d7 c7 b7 a7} - \ - vmovdqu32 _r0, [rbx] \ - vpermi2q _r0, _r6, _r10 \ // r0 = {p8 o8 n8 m8 l8 k8 j8 i8 p0 o0 n0 m0 l0 k0 j0 i0} - vmovdqu32 _r4, [r8] \ - vpermi2q _r4, _r6, _r10 \ // r4 = {p12 o12 n12 m12 l12 k12 j12 i12 p4 o4 n4 m4 l4 k4 j4 i4} - \ - vmovdqu32 _r6, [rbx] \ - vpermi2q _r6, _r11, _r15 \ // r6 = {p9 o9 n9 m9 l9 k9 j9 i9 p1 o1 n1 m1 l1 k1 j1 i1} - vmovdqu32 _r10, [r8] \ - vpermi2q _r10, _r11, _r15 \ // r10 = {p13 o13 n13 m13 l13 k13 j13 i13 p5 o5 n5 m5 l5 k5 j5 i5} - \ - vmovdqu32 _r11, [rbx] \ - vpermi2q _r11, _r9, _r13 \ // r11 = {p10 o10 n10 m10 l10 k10 j10 i10 p2 o2 n2 m2 l2 k2 j2 i2} - vmovdqu32 _r15, [r8] \ - vpermi2q _r15, _r9, _r13 \ // r15 = {p14 o14 n14 m14 l14 k14 j14 i14 p6 o6 n6 m6 l6 k6 j6 i6} - \ - vmovdqu32 _r9, [rbx] \ - vpermi2q _r9, _r8, _r12 \ // r9 = {p11 o11 n11 m11 l11 k11 j11 i11 p3 o3 n3 m3 l3 k3 j3 i3} - vmovdqu32 _r13, [r8] \ - vpermi2q _r13, _r8, _r12 \ // r13 = {p15 o15 n15 m15 l15 k15 j15 i15 p7 o7 n7 m7 l7 k7 j7 i7} - \ - \ // At this point r8 and r12 can be used as scratch registers - vshuff64x2 _r8, _r14, _r0, 0xEE \ // r8 = {p8 o8 n8 m8 l8 k8 j8 i8 h8 g8 f8 e8 d8 c8 b8 a8} - vshuff64x2 _r0, _r14, _r0, 0x44 \ // r0 = {p0 o0 n0 m0 l0 k0 j0 i0 h0 g0 f0 e0 d0 c0 b0 a0} - \ - vshuff64x2 _r12, _t1, _r4, 0xEE \ // r12 = {p12 o12 n12 m12 l12 k12 j12 i12 h12 g12 f12 e12 d12 c12 b12 a12} - vshuff64x2 _r4, _t1, _r4, 0x44 \ // r4 = {p4 o4 n4 m4 l4 k4 j4 i4 h4 g4 f4 e4 d4 c4 b4 a4} - \ - vshuff64x2 _r14, _r7, _r15, 0xEE \ // r14 = {p14 o14 n14 m14 l14 k14 j14 i14 h14 g14 f14 e14 d14 c14 b14 a14} - vshuff64x2 _t1, _r7, _r15, 0x44 \ // t1 = {p6 o6 n6 m6 l6 k6 j6 i6 h6 g6 f6 e6 d6 c6 b6 a6} - \ - vshuff64x2 _r15, _r5, _r13, 0xEE \ // r15 = {p15 o15 n15 m15 l15 k15 j15 i15 h15 g15 f15 e15 d15 c15 b15 a15} - vshuff64x2 _r7, _r5, _r13, 0x44 \ // r7 = {p7 o7 n7 m7 l7 k7 j7 i7 h7 g7 f7 e7 d7 c7 b7 a7} - \ - vshuff64x2 _r13, _t0, _r10, 0xEE \ // r13 = {p13 o13 n13 m13 l13 k13 j13 i13 h13 g13 f13 e13 d13 c13 b13 a13} - vshuff64x2 _r5, _t0, _r10, 0x44 \ // r5 = {p5 o5 n5 m5 l5 k5 j5 i5 h5 g5 f5 e5 d5 c5 b5 a5} - \ - vshuff64x2 _r10, _r3, _r11, 0xEE \ // r10 = {p10 o10 n10 m10 l10 k10 j10 i10 h10 g10 f10 e10 d10 c10 b10 a10} - vshuff64x2 _t0, _r3, _r11, 0x44 \ // t0 = {p2 o2 n2 m2 l2 k2 j2 i2 h2 g2 f2 e2 d2 c2 b2 a2} - \ - vshuff64x2 _r11, _r1, _r9, 0xEE \ // r11 = {p11 o11 n11 m11 l11 k11 j11 i11 h11 g11 f11 e11 d11 c11 b11 a11} - vshuff64x2 _r3, _r1, _r9, 0x44 \ // r3 = {p3 o3 n3 m3 l3 k3 j3 i3 h3 g3 f3 e3 d3 c3 b3 a3} - \ - vshuff64x2 _r9, _r2, _r6, 0xEE \ // r9 = {p9 o9 n9 m9 l9 k9 j9 i9 h9 g9 f9 e9 d9 c9 b9 a9} - vshuff64x2 _r1, _r2, _r6, 0x44 \ // r1 = {p1 o1 n1 m1 l1 k1 j1 i1 h1 g1 f1 e1 d1 c1 b1 a1} - \ - vmovdqu32 _r2, _t0 \ // r2 = {p2 o2 n2 m2 l2 k2 j2 i2 h2 g2 f2 e2 d2 c2 b2 a2} - vmovdqu32 _r6, _t1 \ // r6 = {p6 o6 n6 m6 l6 k6 j6 i6 h6 g6 f6 e6 d6 c6 b6 a6} - - -// CH(A, B, C) = (A&B) ^ (~A&C) -// MAJ(E, F, G) = (E&F) ^ (E&G) ^ (F&G) -// SIGMA0 = ROR_2 ^ ROR_13 ^ ROR_22 -// SIGMA1 = ROR_6 ^ ROR_11 ^ ROR_25 -// sigma0 = ROR_7 ^ ROR_18 ^ SHR_3 -// sigma1 = ROR_17 ^ ROR_19 ^ SHR_10 - -// Main processing loop per round -#define PROCESS_LOOP(_WT, _ROUND, _A, _B, _C, _D, _E, _F, _G, _H) \ - \ // T1 = H + SIGMA1(E) + CH(E, F, G) + Kt + Wt - \ // T2 = SIGMA0(A) + MAJ(A, B, C) - \ // H=G, G=F, F=E, E=D+T1, D=C, C=B, B=A, A=T1+T2 - \ - \ // H becomes T2, then add T1 for A - \ // D becomes D + T1 for E - \ - vpaddd T1, _H, TMP3 \ // T1 = H + Kt - vmovdqu32 TMP0, _E \ - vprord TMP1, _E, 6 \ // ROR_6(E) - vprord TMP2, _E, 11 \ // ROR_11(E) - vprord TMP3, _E, 25 \ // ROR_25(E) - vpternlogd TMP0, _F, _G, 0xCA \ // TMP0 = CH(E,F,G) - vpaddd T1, T1, _WT \ // T1 = T1 + Wt - vpternlogd TMP1, TMP2, TMP3, 0x96 \ // TMP1 = SIGMA1(E) - vpaddd T1, T1, TMP0 \ // T1 = T1 + CH(E,F,G) - vpaddd T1, T1, TMP1 \ // T1 = T1 + SIGMA1(E) - vpaddd _D, _D, T1 \ // D = D + T1 - \ - vprord _H, _A, 2 \ // ROR_2(A) - vprord TMP2, _A, 13 \ // ROR_13(A) - vprord TMP3, _A, 22 \ // ROR_22(A) - vmovdqu32 TMP0, _A \ - vpternlogd TMP0, _B, _C, 0xE8 \ // TMP0 = MAJ(A,B,C) - vpternlogd _H, TMP2, TMP3, 0x96 \ // H(T2) = SIGMA0(A) - vpaddd _H, _H, TMP0 \ // H(T2) = SIGMA0(A) + MAJ(A,B,C) - vpaddd _H, _H, T1 \ // H(A) = H(T2) + T1 - \ - vmovdqu32 TMP3, [TBL + ((_ROUND+1)*64)] \ // Next Kt - - -#define MSG_SCHED_ROUND_16_63(_WT, _WTp1, _WTp9, _WTp14) \ - vprord TMP4, _WTp14, 17 \ // ROR_17(Wt-2) - vprord TMP5, _WTp14, 19 \ // ROR_19(Wt-2) - vpsrld TMP6, _WTp14, 10 \ // SHR_10(Wt-2) - vpternlogd TMP4, TMP5, TMP6, 0x96 \ // TMP4 = sigma1(Wt-2) - \ - vpaddd _WT, _WT, TMP4 \ // Wt = Wt-16 + sigma1(Wt-2) - vpaddd _WT, _WT, _WTp9 \ // Wt = Wt-16 + sigma1(Wt-2) + Wt-7 - \ - vprord TMP4, _WTp1, 7 \ // ROR_7(Wt-15) - vprord TMP5, _WTp1, 18 \ // ROR_18(Wt-15) - vpsrld TMP6, _WTp1, 3 \ // SHR_3(Wt-15) - vpternlogd TMP4, TMP5, TMP6, 0x96 \ // TMP4 = sigma0(Wt-15) - \ - vpaddd _WT, _WT, TMP4 \ // Wt = Wt-16 + sigma1(Wt-2) + - \ // Wt-7 + sigma0(Wt-15) + - - -// Note this is reading in a block of data for one lane -// When all 16 are read, the data must be transposed to build msg schedule -#define MSG_SCHED_ROUND_00_15(_WT, OFFSET, LABEL) \ - TESTQ $(1<(SB), TBL_P9 - vmovdqu32 TMP2, [TBL] - - // Get first K from table - MOVQ table+16(FP), TBL_P9 - vmovdqu32 TMP3, [TBL] - - // Save digests for later addition - vmovdqu32 [SCRATCH + 64*0], A - vmovdqu32 [SCRATCH + 64*1], B - vmovdqu32 [SCRATCH + 64*2], C - vmovdqu32 [SCRATCH + 64*3], D - vmovdqu32 [SCRATCH + 64*4], E - vmovdqu32 [SCRATCH + 64*5], F - vmovdqu32 [SCRATCH + 64*6], G - vmovdqu32 [SCRATCH + 64*7], H - - add IDX, 64 - - // Transpose input data - TRANSPOSE16(W0, W1, W2, W3, W4, W5, W6, W7, W8, W9, W10, W11, W12, W13, W14, W15, TMP0, TMP1) - - vpshufb W0, W0, TMP2 - vpshufb W1, W1, TMP2 - vpshufb W2, W2, TMP2 - vpshufb W3, W3, TMP2 - vpshufb W4, W4, TMP2 - vpshufb W5, W5, TMP2 - vpshufb W6, W6, TMP2 - vpshufb W7, W7, TMP2 - vpshufb W8, W8, TMP2 - vpshufb W9, W9, TMP2 - vpshufb W10, W10, TMP2 - vpshufb W11, W11, TMP2 - vpshufb W12, W12, TMP2 - vpshufb W13, W13, TMP2 - vpshufb W14, W14, TMP2 - vpshufb W15, W15, TMP2 - - // MSG Schedule for W0-W15 is now complete in registers - // Process first 48 rounds - // Calculate next Wt+16 after processing is complete and Wt is unneeded - - PROCESS_LOOP( W0, 0, A, B, C, D, E, F, G, H) - MSG_SCHED_ROUND_16_63( W0, W1, W9, W14) - PROCESS_LOOP( W1, 1, H, A, B, C, D, E, F, G) - MSG_SCHED_ROUND_16_63( W1, W2, W10, W15) - PROCESS_LOOP( W2, 2, G, H, A, B, C, D, E, F) - MSG_SCHED_ROUND_16_63( W2, W3, W11, W0) - PROCESS_LOOP( W3, 3, F, G, H, A, B, C, D, E) - MSG_SCHED_ROUND_16_63( W3, W4, W12, W1) - PROCESS_LOOP( W4, 4, E, F, G, H, A, B, C, D) - MSG_SCHED_ROUND_16_63( W4, W5, W13, W2) - PROCESS_LOOP( W5, 5, D, E, F, G, H, A, B, C) - MSG_SCHED_ROUND_16_63( W5, W6, W14, W3) - PROCESS_LOOP( W6, 6, C, D, E, F, G, H, A, B) - MSG_SCHED_ROUND_16_63( W6, W7, W15, W4) - PROCESS_LOOP( W7, 7, B, C, D, E, F, G, H, A) - MSG_SCHED_ROUND_16_63( W7, W8, W0, W5) - PROCESS_LOOP( W8, 8, A, B, C, D, E, F, G, H) - MSG_SCHED_ROUND_16_63( W8, W9, W1, W6) - PROCESS_LOOP( W9, 9, H, A, B, C, D, E, F, G) - MSG_SCHED_ROUND_16_63( W9, W10, W2, W7) - PROCESS_LOOP(W10, 10, G, H, A, B, C, D, E, F) - MSG_SCHED_ROUND_16_63(W10, W11, W3, W8) - PROCESS_LOOP(W11, 11, F, G, H, A, B, C, D, E) - MSG_SCHED_ROUND_16_63(W11, W12, W4, W9) - PROCESS_LOOP(W12, 12, E, F, G, H, A, B, C, D) - MSG_SCHED_ROUND_16_63(W12, W13, W5, W10) - PROCESS_LOOP(W13, 13, D, E, F, G, H, A, B, C) - MSG_SCHED_ROUND_16_63(W13, W14, W6, W11) - PROCESS_LOOP(W14, 14, C, D, E, F, G, H, A, B) - MSG_SCHED_ROUND_16_63(W14, W15, W7, W12) - PROCESS_LOOP(W15, 15, B, C, D, E, F, G, H, A) - MSG_SCHED_ROUND_16_63(W15, W0, W8, W13) - PROCESS_LOOP( W0, 16, A, B, C, D, E, F, G, H) - MSG_SCHED_ROUND_16_63( W0, W1, W9, W14) - PROCESS_LOOP( W1, 17, H, A, B, C, D, E, F, G) - MSG_SCHED_ROUND_16_63( W1, W2, W10, W15) - PROCESS_LOOP( W2, 18, G, H, A, B, C, D, E, F) - MSG_SCHED_ROUND_16_63( W2, W3, W11, W0) - PROCESS_LOOP( W3, 19, F, G, H, A, B, C, D, E) - MSG_SCHED_ROUND_16_63( W3, W4, W12, W1) - PROCESS_LOOP( W4, 20, E, F, G, H, A, B, C, D) - MSG_SCHED_ROUND_16_63( W4, W5, W13, W2) - PROCESS_LOOP( W5, 21, D, E, F, G, H, A, B, C) - MSG_SCHED_ROUND_16_63( W5, W6, W14, W3) - PROCESS_LOOP( W6, 22, C, D, E, F, G, H, A, B) - MSG_SCHED_ROUND_16_63( W6, W7, W15, W4) - PROCESS_LOOP( W7, 23, B, C, D, E, F, G, H, A) - MSG_SCHED_ROUND_16_63( W7, W8, W0, W5) - PROCESS_LOOP( W8, 24, A, B, C, D, E, F, G, H) - MSG_SCHED_ROUND_16_63( W8, W9, W1, W6) - PROCESS_LOOP( W9, 25, H, A, B, C, D, E, F, G) - MSG_SCHED_ROUND_16_63( W9, W10, W2, W7) - PROCESS_LOOP(W10, 26, G, H, A, B, C, D, E, F) - MSG_SCHED_ROUND_16_63(W10, W11, W3, W8) - PROCESS_LOOP(W11, 27, F, G, H, A, B, C, D, E) - MSG_SCHED_ROUND_16_63(W11, W12, W4, W9) - PROCESS_LOOP(W12, 28, E, F, G, H, A, B, C, D) - MSG_SCHED_ROUND_16_63(W12, W13, W5, W10) - PROCESS_LOOP(W13, 29, D, E, F, G, H, A, B, C) - MSG_SCHED_ROUND_16_63(W13, W14, W6, W11) - PROCESS_LOOP(W14, 30, C, D, E, F, G, H, A, B) - MSG_SCHED_ROUND_16_63(W14, W15, W7, W12) - PROCESS_LOOP(W15, 31, B, C, D, E, F, G, H, A) - MSG_SCHED_ROUND_16_63(W15, W0, W8, W13) - PROCESS_LOOP( W0, 32, A, B, C, D, E, F, G, H) - MSG_SCHED_ROUND_16_63( W0, W1, W9, W14) - PROCESS_LOOP( W1, 33, H, A, B, C, D, E, F, G) - MSG_SCHED_ROUND_16_63( W1, W2, W10, W15) - PROCESS_LOOP( W2, 34, G, H, A, B, C, D, E, F) - MSG_SCHED_ROUND_16_63( W2, W3, W11, W0) - PROCESS_LOOP( W3, 35, F, G, H, A, B, C, D, E) - MSG_SCHED_ROUND_16_63( W3, W4, W12, W1) - PROCESS_LOOP( W4, 36, E, F, G, H, A, B, C, D) - MSG_SCHED_ROUND_16_63( W4, W5, W13, W2) - PROCESS_LOOP( W5, 37, D, E, F, G, H, A, B, C) - MSG_SCHED_ROUND_16_63( W5, W6, W14, W3) - PROCESS_LOOP( W6, 38, C, D, E, F, G, H, A, B) - MSG_SCHED_ROUND_16_63( W6, W7, W15, W4) - PROCESS_LOOP( W7, 39, B, C, D, E, F, G, H, A) - MSG_SCHED_ROUND_16_63( W7, W8, W0, W5) - PROCESS_LOOP( W8, 40, A, B, C, D, E, F, G, H) - MSG_SCHED_ROUND_16_63( W8, W9, W1, W6) - PROCESS_LOOP( W9, 41, H, A, B, C, D, E, F, G) - MSG_SCHED_ROUND_16_63( W9, W10, W2, W7) - PROCESS_LOOP(W10, 42, G, H, A, B, C, D, E, F) - MSG_SCHED_ROUND_16_63(W10, W11, W3, W8) - PROCESS_LOOP(W11, 43, F, G, H, A, B, C, D, E) - MSG_SCHED_ROUND_16_63(W11, W12, W4, W9) - PROCESS_LOOP(W12, 44, E, F, G, H, A, B, C, D) - MSG_SCHED_ROUND_16_63(W12, W13, W5, W10) - PROCESS_LOOP(W13, 45, D, E, F, G, H, A, B, C) - MSG_SCHED_ROUND_16_63(W13, W14, W6, W11) - PROCESS_LOOP(W14, 46, C, D, E, F, G, H, A, B) - MSG_SCHED_ROUND_16_63(W14, W15, W7, W12) - PROCESS_LOOP(W15, 47, B, C, D, E, F, G, H, A) - MSG_SCHED_ROUND_16_63(W15, W0, W8, W13) - - // Check if this is the last block - sub INP_SIZE, 1 - JE lastLoop - - // Load next mask for inputs - ADDQ $8, MASKP_P9 - MOVQ (MASKP_P9), MASK_P9 - - // Process last 16 rounds - // Read in next block msg data for use in first 16 words of msg sched - - PROCESS_LOOP( W0, 48, A, B, C, D, E, F, G, H) - MSG_SCHED_ROUND_00_15( W0, 0, skipNext0) - PROCESS_LOOP( W1, 49, H, A, B, C, D, E, F, G) - MSG_SCHED_ROUND_00_15( W1, 1, skipNext1) - PROCESS_LOOP( W2, 50, G, H, A, B, C, D, E, F) - MSG_SCHED_ROUND_00_15( W2, 2, skipNext2) - PROCESS_LOOP( W3, 51, F, G, H, A, B, C, D, E) - MSG_SCHED_ROUND_00_15( W3, 3, skipNext3) - PROCESS_LOOP( W4, 52, E, F, G, H, A, B, C, D) - MSG_SCHED_ROUND_00_15( W4, 4, skipNext4) - PROCESS_LOOP( W5, 53, D, E, F, G, H, A, B, C) - MSG_SCHED_ROUND_00_15( W5, 5, skipNext5) - PROCESS_LOOP( W6, 54, C, D, E, F, G, H, A, B) - MSG_SCHED_ROUND_00_15( W6, 6, skipNext6) - PROCESS_LOOP( W7, 55, B, C, D, E, F, G, H, A) - MSG_SCHED_ROUND_00_15( W7, 7, skipNext7) - PROCESS_LOOP( W8, 56, A, B, C, D, E, F, G, H) - MSG_SCHED_ROUND_00_15( W8, 8, skipNext8) - PROCESS_LOOP( W9, 57, H, A, B, C, D, E, F, G) - MSG_SCHED_ROUND_00_15( W9, 9, skipNext9) - PROCESS_LOOP(W10, 58, G, H, A, B, C, D, E, F) - MSG_SCHED_ROUND_00_15(W10, 10, skipNext10) - PROCESS_LOOP(W11, 59, F, G, H, A, B, C, D, E) - MSG_SCHED_ROUND_00_15(W11, 11, skipNext11) - PROCESS_LOOP(W12, 60, E, F, G, H, A, B, C, D) - MSG_SCHED_ROUND_00_15(W12, 12, skipNext12) - PROCESS_LOOP(W13, 61, D, E, F, G, H, A, B, C) - MSG_SCHED_ROUND_00_15(W13, 13, skipNext13) - PROCESS_LOOP(W14, 62, C, D, E, F, G, H, A, B) - MSG_SCHED_ROUND_00_15(W14, 14, skipNext14) - PROCESS_LOOP(W15, 63, B, C, D, E, F, G, H, A) - MSG_SCHED_ROUND_00_15(W15, 15, skipNext15) - - // Add old digest - vmovdqu32 TMP2, A - vmovdqu32 A, [SCRATCH + 64*0] - vpaddd A{k1}, A, TMP2 - vmovdqu32 TMP2, B - vmovdqu32 B, [SCRATCH + 64*1] - vpaddd B{k1}, B, TMP2 - vmovdqu32 TMP2, C - vmovdqu32 C, [SCRATCH + 64*2] - vpaddd C{k1}, C, TMP2 - vmovdqu32 TMP2, D - vmovdqu32 D, [SCRATCH + 64*3] - vpaddd D{k1}, D, TMP2 - vmovdqu32 TMP2, E - vmovdqu32 E, [SCRATCH + 64*4] - vpaddd E{k1}, E, TMP2 - vmovdqu32 TMP2, F - vmovdqu32 F, [SCRATCH + 64*5] - vpaddd F{k1}, F, TMP2 - vmovdqu32 TMP2, G - vmovdqu32 G, [SCRATCH + 64*6] - vpaddd G{k1}, G, TMP2 - vmovdqu32 TMP2, H - vmovdqu32 H, [SCRATCH + 64*7] - vpaddd H{k1}, H, TMP2 - - kmovq k1, mask - JMP lloop - -lastLoop: - // Process last 16 rounds - PROCESS_LOOP( W0, 48, A, B, C, D, E, F, G, H) - PROCESS_LOOP( W1, 49, H, A, B, C, D, E, F, G) - PROCESS_LOOP( W2, 50, G, H, A, B, C, D, E, F) - PROCESS_LOOP( W3, 51, F, G, H, A, B, C, D, E) - PROCESS_LOOP( W4, 52, E, F, G, H, A, B, C, D) - PROCESS_LOOP( W5, 53, D, E, F, G, H, A, B, C) - PROCESS_LOOP( W6, 54, C, D, E, F, G, H, A, B) - PROCESS_LOOP( W7, 55, B, C, D, E, F, G, H, A) - PROCESS_LOOP( W8, 56, A, B, C, D, E, F, G, H) - PROCESS_LOOP( W9, 57, H, A, B, C, D, E, F, G) - PROCESS_LOOP(W10, 58, G, H, A, B, C, D, E, F) - PROCESS_LOOP(W11, 59, F, G, H, A, B, C, D, E) - PROCESS_LOOP(W12, 60, E, F, G, H, A, B, C, D) - PROCESS_LOOP(W13, 61, D, E, F, G, H, A, B, C) - PROCESS_LOOP(W14, 62, C, D, E, F, G, H, A, B) - PROCESS_LOOP(W15, 63, B, C, D, E, F, G, H, A) - - // Add old digest - vmovdqu32 TMP2, A - vmovdqu32 A, [SCRATCH + 64*0] - vpaddd A{k1}, A, TMP2 - vmovdqu32 TMP2, B - vmovdqu32 B, [SCRATCH + 64*1] - vpaddd B{k1}, B, TMP2 - vmovdqu32 TMP2, C - vmovdqu32 C, [SCRATCH + 64*2] - vpaddd C{k1}, C, TMP2 - vmovdqu32 TMP2, D - vmovdqu32 D, [SCRATCH + 64*3] - vpaddd D{k1}, D, TMP2 - vmovdqu32 TMP2, E - vmovdqu32 E, [SCRATCH + 64*4] - vpaddd E{k1}, E, TMP2 - vmovdqu32 TMP2, F - vmovdqu32 F, [SCRATCH + 64*5] - vpaddd F{k1}, F, TMP2 - vmovdqu32 TMP2, G - vmovdqu32 G, [SCRATCH + 64*6] - vpaddd G{k1}, G, TMP2 - vmovdqu32 TMP2, H - vmovdqu32 H, [SCRATCH + 64*7] - vpaddd H{k1}, H, TMP2 - - // Write out digest - vmovdqu32 [STATE + 0*SHA256_DIGEST_ROW_SIZE], A - vmovdqu32 [STATE + 1*SHA256_DIGEST_ROW_SIZE], B - vmovdqu32 [STATE + 2*SHA256_DIGEST_ROW_SIZE], C - vmovdqu32 [STATE + 3*SHA256_DIGEST_ROW_SIZE], D - vmovdqu32 [STATE + 4*SHA256_DIGEST_ROW_SIZE], E - vmovdqu32 [STATE + 5*SHA256_DIGEST_ROW_SIZE], F - vmovdqu32 [STATE + 6*SHA256_DIGEST_ROW_SIZE], G - vmovdqu32 [STATE + 7*SHA256_DIGEST_ROW_SIZE], H - - VZEROUPPER - RET - -// -// Tables -// - -DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x000(SB)/8, $0x0405060700010203 -DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x008(SB)/8, $0x0c0d0e0f08090a0b -DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x010(SB)/8, $0x0405060700010203 -DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x018(SB)/8, $0x0c0d0e0f08090a0b -DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x020(SB)/8, $0x0405060700010203 -DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x028(SB)/8, $0x0c0d0e0f08090a0b -DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x030(SB)/8, $0x0405060700010203 -DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x038(SB)/8, $0x0c0d0e0f08090a0b -GLOBL PSHUFFLE_BYTE_FLIP_MASK<>(SB), 8, $64 - -DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x000(SB)/8, $0x0000000000000000 -DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x008(SB)/8, $0x0000000000000001 -DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x010(SB)/8, $0x0000000000000008 -DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x018(SB)/8, $0x0000000000000009 -DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x020(SB)/8, $0x0000000000000004 -DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x028(SB)/8, $0x0000000000000005 -DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x030(SB)/8, $0x000000000000000C -DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x038(SB)/8, $0x000000000000000D -GLOBL PSHUFFLE_TRANSPOSE16_MASK1<>(SB), 8, $64 - -DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x000(SB)/8, $0x0000000000000002 -DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x008(SB)/8, $0x0000000000000003 -DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x010(SB)/8, $0x000000000000000A -DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x018(SB)/8, $0x000000000000000B -DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x020(SB)/8, $0x0000000000000006 -DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x028(SB)/8, $0x0000000000000007 -DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x030(SB)/8, $0x000000000000000E -DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x038(SB)/8, $0x000000000000000F -GLOBL PSHUFFLE_TRANSPOSE16_MASK2<>(SB), 8, $64 diff --git a/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.go b/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.go deleted file mode 100644 index b7d7c16..0000000 --- a/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.go +++ /dev/null @@ -1,500 +0,0 @@ -//+build !noasm,!appengine,gc - -/* - * Minio Cloud Storage, (C) 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sha256 - -import ( - "encoding/binary" - "errors" - "hash" - "sort" - "sync/atomic" - "time" -) - -//go:noescape -func sha256X16Avx512(digests *[512]byte, scratch *[512]byte, table *[512]uint64, mask []uint64, inputs [16][]byte) - -// Avx512ServerUID - Do not start at 0 but next multiple of 16 so as to be able to -// differentiate with default initialiation value of 0 -const Avx512ServerUID = 16 - -var uidCounter uint64 - -// NewAvx512 - initialize sha256 Avx512 implementation. -func NewAvx512(a512srv *Avx512Server) hash.Hash { - uid := atomic.AddUint64(&uidCounter, 1) - return &Avx512Digest{uid: uid, a512srv: a512srv} -} - -// Avx512Digest - Type for computing SHA256 using Avx512 -type Avx512Digest struct { - uid uint64 - a512srv *Avx512Server - x [chunk]byte - nx int - len uint64 - final bool - result [Size]byte -} - -// Size - Return size of checksum -func (d *Avx512Digest) Size() int { return Size } - -// BlockSize - Return blocksize of checksum -func (d Avx512Digest) BlockSize() int { return BlockSize } - -// Reset - reset sha digest to its initial values -func (d *Avx512Digest) Reset() { - d.a512srv.blocksCh <- blockInput{uid: d.uid, reset: true} - d.nx = 0 - d.len = 0 - d.final = false -} - -// Write to digest -func (d *Avx512Digest) Write(p []byte) (nn int, err error) { - - if d.final { - return 0, errors.New("Avx512Digest already finalized. Reset first before writing again") - } - - nn = len(p) - d.len += uint64(nn) - if d.nx > 0 { - n := copy(d.x[d.nx:], p) - d.nx += n - if d.nx == chunk { - d.a512srv.blocksCh <- blockInput{uid: d.uid, msg: d.x[:]} - d.nx = 0 - } - p = p[n:] - } - if len(p) >= chunk { - n := len(p) &^ (chunk - 1) - d.a512srv.blocksCh <- blockInput{uid: d.uid, msg: p[:n]} - p = p[n:] - } - if len(p) > 0 { - d.nx = copy(d.x[:], p) - } - return -} - -// Sum - Return sha256 sum in bytes -func (d *Avx512Digest) Sum(in []byte) (result []byte) { - - if d.final { - return append(in, d.result[:]...) - } - - trail := make([]byte, 0, 128) - trail = append(trail, d.x[:d.nx]...) - - len := d.len - // Padding. Add a 1 bit and 0 bits until 56 bytes mod 64. - var tmp [64]byte - tmp[0] = 0x80 - if len%64 < 56 { - trail = append(trail, tmp[0:56-len%64]...) - } else { - trail = append(trail, tmp[0:64+56-len%64]...) - } - d.nx = 0 - - // Length in bits. - len <<= 3 - for i := uint(0); i < 8; i++ { - tmp[i] = byte(len >> (56 - 8*i)) - } - trail = append(trail, tmp[0:8]...) - - sumCh := make(chan [Size]byte) - d.a512srv.blocksCh <- blockInput{uid: d.uid, msg: trail, final: true, sumCh: sumCh} - d.result = <-sumCh - d.final = true - return append(in, d.result[:]...) -} - -var table = [512]uint64{ - 0x428a2f98428a2f98, 0x428a2f98428a2f98, 0x428a2f98428a2f98, 0x428a2f98428a2f98, - 0x428a2f98428a2f98, 0x428a2f98428a2f98, 0x428a2f98428a2f98, 0x428a2f98428a2f98, - 0x7137449171374491, 0x7137449171374491, 0x7137449171374491, 0x7137449171374491, - 0x7137449171374491, 0x7137449171374491, 0x7137449171374491, 0x7137449171374491, - 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf, - 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf, - 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5, - 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5, - 0x3956c25b3956c25b, 0x3956c25b3956c25b, 0x3956c25b3956c25b, 0x3956c25b3956c25b, - 0x3956c25b3956c25b, 0x3956c25b3956c25b, 0x3956c25b3956c25b, 0x3956c25b3956c25b, - 0x59f111f159f111f1, 0x59f111f159f111f1, 0x59f111f159f111f1, 0x59f111f159f111f1, - 0x59f111f159f111f1, 0x59f111f159f111f1, 0x59f111f159f111f1, 0x59f111f159f111f1, - 0x923f82a4923f82a4, 0x923f82a4923f82a4, 0x923f82a4923f82a4, 0x923f82a4923f82a4, - 0x923f82a4923f82a4, 0x923f82a4923f82a4, 0x923f82a4923f82a4, 0x923f82a4923f82a4, - 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5, - 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5, - 0xd807aa98d807aa98, 0xd807aa98d807aa98, 0xd807aa98d807aa98, 0xd807aa98d807aa98, - 0xd807aa98d807aa98, 0xd807aa98d807aa98, 0xd807aa98d807aa98, 0xd807aa98d807aa98, - 0x12835b0112835b01, 0x12835b0112835b01, 0x12835b0112835b01, 0x12835b0112835b01, - 0x12835b0112835b01, 0x12835b0112835b01, 0x12835b0112835b01, 0x12835b0112835b01, - 0x243185be243185be, 0x243185be243185be, 0x243185be243185be, 0x243185be243185be, - 0x243185be243185be, 0x243185be243185be, 0x243185be243185be, 0x243185be243185be, - 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3, - 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3, - 0x72be5d7472be5d74, 0x72be5d7472be5d74, 0x72be5d7472be5d74, 0x72be5d7472be5d74, - 0x72be5d7472be5d74, 0x72be5d7472be5d74, 0x72be5d7472be5d74, 0x72be5d7472be5d74, - 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe, - 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe, - 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7, - 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7, - 0xc19bf174c19bf174, 0xc19bf174c19bf174, 0xc19bf174c19bf174, 0xc19bf174c19bf174, - 0xc19bf174c19bf174, 0xc19bf174c19bf174, 0xc19bf174c19bf174, 0xc19bf174c19bf174, - 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1, - 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1, - 0xefbe4786efbe4786, 0xefbe4786efbe4786, 0xefbe4786efbe4786, 0xefbe4786efbe4786, - 0xefbe4786efbe4786, 0xefbe4786efbe4786, 0xefbe4786efbe4786, 0xefbe4786efbe4786, - 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6, - 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6, - 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc, - 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc, - 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f, - 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f, - 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa, - 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa, - 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc, - 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc, - 0x76f988da76f988da, 0x76f988da76f988da, 0x76f988da76f988da, 0x76f988da76f988da, - 0x76f988da76f988da, 0x76f988da76f988da, 0x76f988da76f988da, 0x76f988da76f988da, - 0x983e5152983e5152, 0x983e5152983e5152, 0x983e5152983e5152, 0x983e5152983e5152, - 0x983e5152983e5152, 0x983e5152983e5152, 0x983e5152983e5152, 0x983e5152983e5152, - 0xa831c66da831c66d, 0xa831c66da831c66d, 0xa831c66da831c66d, 0xa831c66da831c66d, - 0xa831c66da831c66d, 0xa831c66da831c66d, 0xa831c66da831c66d, 0xa831c66da831c66d, - 0xb00327c8b00327c8, 0xb00327c8b00327c8, 0xb00327c8b00327c8, 0xb00327c8b00327c8, - 0xb00327c8b00327c8, 0xb00327c8b00327c8, 0xb00327c8b00327c8, 0xb00327c8b00327c8, - 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7, - 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7, - 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3, - 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3, - 0xd5a79147d5a79147, 0xd5a79147d5a79147, 0xd5a79147d5a79147, 0xd5a79147d5a79147, - 0xd5a79147d5a79147, 0xd5a79147d5a79147, 0xd5a79147d5a79147, 0xd5a79147d5a79147, - 0x06ca635106ca6351, 0x06ca635106ca6351, 0x06ca635106ca6351, 0x06ca635106ca6351, - 0x06ca635106ca6351, 0x06ca635106ca6351, 0x06ca635106ca6351, 0x06ca635106ca6351, - 0x1429296714292967, 0x1429296714292967, 0x1429296714292967, 0x1429296714292967, - 0x1429296714292967, 0x1429296714292967, 0x1429296714292967, 0x1429296714292967, - 0x27b70a8527b70a85, 0x27b70a8527b70a85, 0x27b70a8527b70a85, 0x27b70a8527b70a85, - 0x27b70a8527b70a85, 0x27b70a8527b70a85, 0x27b70a8527b70a85, 0x27b70a8527b70a85, - 0x2e1b21382e1b2138, 0x2e1b21382e1b2138, 0x2e1b21382e1b2138, 0x2e1b21382e1b2138, - 0x2e1b21382e1b2138, 0x2e1b21382e1b2138, 0x2e1b21382e1b2138, 0x2e1b21382e1b2138, - 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc, - 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc, - 0x53380d1353380d13, 0x53380d1353380d13, 0x53380d1353380d13, 0x53380d1353380d13, - 0x53380d1353380d13, 0x53380d1353380d13, 0x53380d1353380d13, 0x53380d1353380d13, - 0x650a7354650a7354, 0x650a7354650a7354, 0x650a7354650a7354, 0x650a7354650a7354, - 0x650a7354650a7354, 0x650a7354650a7354, 0x650a7354650a7354, 0x650a7354650a7354, - 0x766a0abb766a0abb, 0x766a0abb766a0abb, 0x766a0abb766a0abb, 0x766a0abb766a0abb, - 0x766a0abb766a0abb, 0x766a0abb766a0abb, 0x766a0abb766a0abb, 0x766a0abb766a0abb, - 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e, - 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e, - 0x92722c8592722c85, 0x92722c8592722c85, 0x92722c8592722c85, 0x92722c8592722c85, - 0x92722c8592722c85, 0x92722c8592722c85, 0x92722c8592722c85, 0x92722c8592722c85, - 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1, - 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1, - 0xa81a664ba81a664b, 0xa81a664ba81a664b, 0xa81a664ba81a664b, 0xa81a664ba81a664b, - 0xa81a664ba81a664b, 0xa81a664ba81a664b, 0xa81a664ba81a664b, 0xa81a664ba81a664b, - 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70, - 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70, - 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3, - 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3, - 0xd192e819d192e819, 0xd192e819d192e819, 0xd192e819d192e819, 0xd192e819d192e819, - 0xd192e819d192e819, 0xd192e819d192e819, 0xd192e819d192e819, 0xd192e819d192e819, - 0xd6990624d6990624, 0xd6990624d6990624, 0xd6990624d6990624, 0xd6990624d6990624, - 0xd6990624d6990624, 0xd6990624d6990624, 0xd6990624d6990624, 0xd6990624d6990624, - 0xf40e3585f40e3585, 0xf40e3585f40e3585, 0xf40e3585f40e3585, 0xf40e3585f40e3585, - 0xf40e3585f40e3585, 0xf40e3585f40e3585, 0xf40e3585f40e3585, 0xf40e3585f40e3585, - 0x106aa070106aa070, 0x106aa070106aa070, 0x106aa070106aa070, 0x106aa070106aa070, - 0x106aa070106aa070, 0x106aa070106aa070, 0x106aa070106aa070, 0x106aa070106aa070, - 0x19a4c11619a4c116, 0x19a4c11619a4c116, 0x19a4c11619a4c116, 0x19a4c11619a4c116, - 0x19a4c11619a4c116, 0x19a4c11619a4c116, 0x19a4c11619a4c116, 0x19a4c11619a4c116, - 0x1e376c081e376c08, 0x1e376c081e376c08, 0x1e376c081e376c08, 0x1e376c081e376c08, - 0x1e376c081e376c08, 0x1e376c081e376c08, 0x1e376c081e376c08, 0x1e376c081e376c08, - 0x2748774c2748774c, 0x2748774c2748774c, 0x2748774c2748774c, 0x2748774c2748774c, - 0x2748774c2748774c, 0x2748774c2748774c, 0x2748774c2748774c, 0x2748774c2748774c, - 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5, - 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5, - 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3, - 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3, - 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a, - 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a, - 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f, - 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f, - 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3, - 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3, - 0x748f82ee748f82ee, 0x748f82ee748f82ee, 0x748f82ee748f82ee, 0x748f82ee748f82ee, - 0x748f82ee748f82ee, 0x748f82ee748f82ee, 0x748f82ee748f82ee, 0x748f82ee748f82ee, - 0x78a5636f78a5636f, 0x78a5636f78a5636f, 0x78a5636f78a5636f, 0x78a5636f78a5636f, - 0x78a5636f78a5636f, 0x78a5636f78a5636f, 0x78a5636f78a5636f, 0x78a5636f78a5636f, - 0x84c8781484c87814, 0x84c8781484c87814, 0x84c8781484c87814, 0x84c8781484c87814, - 0x84c8781484c87814, 0x84c8781484c87814, 0x84c8781484c87814, 0x84c8781484c87814, - 0x8cc702088cc70208, 0x8cc702088cc70208, 0x8cc702088cc70208, 0x8cc702088cc70208, - 0x8cc702088cc70208, 0x8cc702088cc70208, 0x8cc702088cc70208, 0x8cc702088cc70208, - 0x90befffa90befffa, 0x90befffa90befffa, 0x90befffa90befffa, 0x90befffa90befffa, - 0x90befffa90befffa, 0x90befffa90befffa, 0x90befffa90befffa, 0x90befffa90befffa, - 0xa4506ceba4506ceb, 0xa4506ceba4506ceb, 0xa4506ceba4506ceb, 0xa4506ceba4506ceb, - 0xa4506ceba4506ceb, 0xa4506ceba4506ceb, 0xa4506ceba4506ceb, 0xa4506ceba4506ceb, - 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7, - 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7, - 0xc67178f2c67178f2, 0xc67178f2c67178f2, 0xc67178f2c67178f2, 0xc67178f2c67178f2, - 0xc67178f2c67178f2, 0xc67178f2c67178f2, 0xc67178f2c67178f2, 0xc67178f2c67178f2} - -// Interface function to assembly ode -func blockAvx512(digests *[512]byte, input [16][]byte, mask []uint64) [16][Size]byte { - - scratch := [512]byte{} - sha256X16Avx512(digests, &scratch, &table, mask, input) - - output := [16][Size]byte{} - for i := 0; i < 16; i++ { - output[i] = getDigest(i, digests[:]) - } - - return output -} - -func getDigest(index int, state []byte) (sum [Size]byte) { - for j := 0; j < 16; j += 2 { - for i := index*4 + j*Size; i < index*4+(j+1)*Size; i += Size { - binary.BigEndian.PutUint32(sum[j*2:], binary.LittleEndian.Uint32(state[i:i+4])) - } - } - return -} - -// Message to send across input channel -type blockInput struct { - uid uint64 - msg []byte - reset bool - final bool - sumCh chan [Size]byte -} - -// Avx512Server - Type to implement 16x parallel handling of SHA256 invocations -type Avx512Server struct { - blocksCh chan blockInput // Input channel - totalIn int // Total number of inputs waiting to be processed - lanes [16]Avx512LaneInfo // Array with info per lane (out of 16) - digests map[uint64][Size]byte // Map of uids to (interim) digest results -} - -// Avx512LaneInfo - Info for each lane -type Avx512LaneInfo struct { - uid uint64 // unique identification for this SHA processing - block []byte // input block to be processed - outputCh chan [Size]byte // channel for output result -} - -// NewAvx512Server - Create new object for parallel processing handling -func NewAvx512Server() *Avx512Server { - a512srv := &Avx512Server{} - a512srv.digests = make(map[uint64][Size]byte) - a512srv.blocksCh = make(chan blockInput) - - // Start a single thread for reading from the input channel - go a512srv.Process() - return a512srv -} - -// Process - Sole handler for reading from the input channel -func (a512srv *Avx512Server) Process() { - for { - select { - case block := <-a512srv.blocksCh: - if block.reset { - a512srv.reset(block.uid) - continue - } - index := block.uid & 0xf - // fmt.Println("Adding message:", block.uid, index) - - if a512srv.lanes[index].block != nil { // If slot is already filled, process all inputs - //fmt.Println("Invoking Blocks()") - a512srv.blocks() - } - a512srv.totalIn++ - a512srv.lanes[index] = Avx512LaneInfo{uid: block.uid, block: block.msg} - if block.final { - a512srv.lanes[index].outputCh = block.sumCh - } - if a512srv.totalIn == len(a512srv.lanes) { - // fmt.Println("Invoking Blocks() while FULL: ") - a512srv.blocks() - } - - // TODO: test with larger timeout - case <-time.After(1 * time.Microsecond): - for _, lane := range a512srv.lanes { - if lane.block != nil { // check if there is any input to process - // fmt.Println("Invoking Blocks() on TIMEOUT: ") - a512srv.blocks() - break // we are done - } - } - } - } -} - -// Do a reset for this calculation -func (a512srv *Avx512Server) reset(uid uint64) { - - // Check if there is a message still waiting to be processed (and remove if so) - for i, lane := range a512srv.lanes { - if lane.uid == uid { - if lane.block != nil { - a512srv.lanes[i] = Avx512LaneInfo{} // clear message - a512srv.totalIn-- - } - } - } - - // Delete entry from hash map - delete(a512srv.digests, uid) -} - -// Invoke assembly and send results back -func (a512srv *Avx512Server) blocks() { - - inputs := [16][]byte{} - for i := range inputs { - inputs[i] = a512srv.lanes[i].block - } - - mask := expandMask(genMask(inputs)) - outputs := blockAvx512(a512srv.getDigests(), inputs, mask) - - a512srv.totalIn = 0 - for i := 0; i < len(outputs); i++ { - uid, outputCh := a512srv.lanes[i].uid, a512srv.lanes[i].outputCh - a512srv.digests[uid] = outputs[i] - a512srv.lanes[i] = Avx512LaneInfo{} - - if outputCh != nil { - // Send back result - outputCh <- outputs[i] - delete(a512srv.digests, uid) // Delete entry from hashmap - } - } -} - -func (a512srv *Avx512Server) Write(uid uint64, p []byte) (nn int, err error) { - a512srv.blocksCh <- blockInput{uid: uid, msg: p} - return len(p), nil -} - -// Sum - return sha256 sum in bytes for a given sum id. -func (a512srv *Avx512Server) Sum(uid uint64, p []byte) [32]byte { - sumCh := make(chan [32]byte) - a512srv.blocksCh <- blockInput{uid: uid, msg: p, final: true, sumCh: sumCh} - return <-sumCh -} - -func (a512srv *Avx512Server) getDigests() *[512]byte { - digests := [512]byte{} - for i, lane := range a512srv.lanes { - a, ok := a512srv.digests[lane.uid] - if ok { - binary.BigEndian.PutUint32(digests[(i+0*16)*4:], binary.LittleEndian.Uint32(a[0:4])) - binary.BigEndian.PutUint32(digests[(i+1*16)*4:], binary.LittleEndian.Uint32(a[4:8])) - binary.BigEndian.PutUint32(digests[(i+2*16)*4:], binary.LittleEndian.Uint32(a[8:12])) - binary.BigEndian.PutUint32(digests[(i+3*16)*4:], binary.LittleEndian.Uint32(a[12:16])) - binary.BigEndian.PutUint32(digests[(i+4*16)*4:], binary.LittleEndian.Uint32(a[16:20])) - binary.BigEndian.PutUint32(digests[(i+5*16)*4:], binary.LittleEndian.Uint32(a[20:24])) - binary.BigEndian.PutUint32(digests[(i+6*16)*4:], binary.LittleEndian.Uint32(a[24:28])) - binary.BigEndian.PutUint32(digests[(i+7*16)*4:], binary.LittleEndian.Uint32(a[28:32])) - } else { - binary.LittleEndian.PutUint32(digests[(i+0*16)*4:], init0) - binary.LittleEndian.PutUint32(digests[(i+1*16)*4:], init1) - binary.LittleEndian.PutUint32(digests[(i+2*16)*4:], init2) - binary.LittleEndian.PutUint32(digests[(i+3*16)*4:], init3) - binary.LittleEndian.PutUint32(digests[(i+4*16)*4:], init4) - binary.LittleEndian.PutUint32(digests[(i+5*16)*4:], init5) - binary.LittleEndian.PutUint32(digests[(i+6*16)*4:], init6) - binary.LittleEndian.PutUint32(digests[(i+7*16)*4:], init7) - } - } - return &digests -} - -// Helper struct for sorting blocks based on length -type lane struct { - len uint - pos uint -} - -type lanes []lane - -func (lns lanes) Len() int { return len(lns) } -func (lns lanes) Swap(i, j int) { lns[i], lns[j] = lns[j], lns[i] } -func (lns lanes) Less(i, j int) bool { return lns[i].len < lns[j].len } - -// Helper struct for -type maskRounds struct { - mask uint64 - rounds uint64 -} - -func genMask(input [16][]byte) [16]maskRounds { - - // Sort on blocks length small to large - var sorted [16]lane - for c, inpt := range input { - sorted[c] = lane{uint(len(inpt)), uint(c)} - } - sort.Sort(lanes(sorted[:])) - - // Create mask array including 'rounds' between masks - m, round, index := uint64(0xffff), uint64(0), 0 - var mr [16]maskRounds - for _, s := range sorted { - if s.len > 0 { - if uint64(s.len)>>6 > round { - mr[index] = maskRounds{m, (uint64(s.len) >> 6) - round} - index++ - } - round = uint64(s.len) >> 6 - } - m = m & ^(1 << uint(s.pos)) - } - - return mr -} - -// TODO: remove function -func expandMask(mr [16]maskRounds) []uint64 { - size := uint64(0) - for _, r := range mr { - size += r.rounds - } - result, index := make([]uint64, size), 0 - for _, r := range mr { - for j := uint64(0); j < r.rounds; j++ { - result[index] = r.mask - index++ - } - } - return result -} diff --git a/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.s b/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.s deleted file mode 100644 index cca534e..0000000 --- a/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.s +++ /dev/null @@ -1,267 +0,0 @@ -//+build !noasm,!appengine,gc - -TEXT ·sha256X16Avx512(SB), 7, $0 - MOVQ digests+0(FP), DI - MOVQ scratch+8(FP), R12 - MOVQ mask_len+32(FP), SI - MOVQ mask_base+24(FP), R13 - MOVQ (R13), R14 - LONG $0x92fbc1c4; BYTE $0xce - LEAQ inputs+48(FP), AX - QUAD $0xf162076f487ef162; QUAD $0x7ef162014f6f487e; QUAD $0x487ef16202576f48; QUAD $0x6f487ef162035f6f; QUAD $0x6f6f487ef1620467; QUAD $0x06776f487ef16205; LONG $0x487ef162; WORD $0x7f6f; BYTE $0x07 - MOVQ table+16(FP), DX - WORD $0x3148; BYTE $0xc9 - TESTQ $(1<<0), R14 - JE skipInput0 - MOVQ 0*24(AX), R9 - LONG $0x487cc162; WORD $0x0410; BYTE $0x09 - -skipInput0: - TESTQ $(1<<1), R14 - JE skipInput1 - MOVQ 1*24(AX), R9 - LONG $0x487cc162; WORD $0x0c10; BYTE $0x09 - -skipInput1: - TESTQ $(1<<2), R14 - JE skipInput2 - MOVQ 2*24(AX), R9 - LONG $0x487cc162; WORD $0x1410; BYTE $0x09 - -skipInput2: - TESTQ $(1<<3), R14 - JE skipInput3 - MOVQ 3*24(AX), R9 - LONG $0x487cc162; WORD $0x1c10; BYTE $0x09 - -skipInput3: - TESTQ $(1<<4), R14 - JE skipInput4 - MOVQ 4*24(AX), R9 - LONG $0x487cc162; WORD $0x2410; BYTE $0x09 - -skipInput4: - TESTQ $(1<<5), R14 - JE skipInput5 - MOVQ 5*24(AX), R9 - LONG $0x487cc162; WORD $0x2c10; BYTE $0x09 - -skipInput5: - TESTQ $(1<<6), R14 - JE skipInput6 - MOVQ 6*24(AX), R9 - LONG $0x487cc162; WORD $0x3410; BYTE $0x09 - -skipInput6: - TESTQ $(1<<7), R14 - JE skipInput7 - MOVQ 7*24(AX), R9 - LONG $0x487cc162; WORD $0x3c10; BYTE $0x09 - -skipInput7: - TESTQ $(1<<8), R14 - JE skipInput8 - MOVQ 8*24(AX), R9 - LONG $0x487c4162; WORD $0x0410; BYTE $0x09 - -skipInput8: - TESTQ $(1<<9), R14 - JE skipInput9 - MOVQ 9*24(AX), R9 - LONG $0x487c4162; WORD $0x0c10; BYTE $0x09 - -skipInput9: - TESTQ $(1<<10), R14 - JE skipInput10 - MOVQ 10*24(AX), R9 - LONG $0x487c4162; WORD $0x1410; BYTE $0x09 - -skipInput10: - TESTQ $(1<<11), R14 - JE skipInput11 - MOVQ 11*24(AX), R9 - LONG $0x487c4162; WORD $0x1c10; BYTE $0x09 - -skipInput11: - TESTQ $(1<<12), R14 - JE skipInput12 - MOVQ 12*24(AX), R9 - LONG $0x487c4162; WORD $0x2410; BYTE $0x09 - -skipInput12: - TESTQ $(1<<13), R14 - JE skipInput13 - MOVQ 13*24(AX), R9 - LONG $0x487c4162; WORD $0x2c10; BYTE $0x09 - -skipInput13: - TESTQ $(1<<14), R14 - JE skipInput14 - MOVQ 14*24(AX), R9 - LONG $0x487c4162; WORD $0x3410; BYTE $0x09 - -skipInput14: - TESTQ $(1<<15), R14 - JE skipInput15 - MOVQ 15*24(AX), R9 - LONG $0x487c4162; WORD $0x3c10; BYTE $0x09 - -skipInput15: -lloop: - LEAQ PSHUFFLE_BYTE_FLIP_MASK<>(SB), DX - LONG $0x487e7162; WORD $0x1a6f - MOVQ table+16(FP), DX - QUAD $0xd162226f487e7162; QUAD $0x7ed16224047f487e; QUAD $0x7ed16201244c7f48; QUAD $0x7ed1620224547f48; QUAD $0x7ed16203245c7f48; QUAD $0x7ed1620424647f48; QUAD $0x7ed16205246c7f48; QUAD $0x7ed1620624747f48; QUAD $0xc1834807247c7f48; QUAD $0x44c9c6407c316240; QUAD $0x62eec1c6407ca162; QUAD $0xa16244d3c6406c31; QUAD $0x34c162eed3c6406c; QUAD $0x407ca162dddac648; QUAD $0xc6407ca16288cac6; QUAD $0xcac648345162ddc2; QUAD $0x44d5c6405ca16288; QUAD $0x62eee5c6405ca162; QUAD $0xa16244d7c6404c31; QUAD $0x6cc162eef7c6404c; QUAD $0x405ca162ddfac640; QUAD $0xc6405ca16288eec6; QUAD $0xd2c6406cc162dde6; QUAD $0x44f1c6403c816288; QUAD $0x62eec1c6403c0162; QUAD $0x016244d3c6402c11; QUAD $0x4c4162eed3c6402c; QUAD $0x403c0162dddac640; QUAD $0xc6403c016288cac6; QUAD $0xf2c6404cc162ddc2; QUAD $0x44d5c6401c016288; QUAD $0x62eee5c6401c0162; QUAD $0x016244d7c6400c11; QUAD $0x2c4162eef7c6400c; QUAD $0x401c0162ddfac640; QUAD $0xc6401c016288eec6; QUAD $0xd2c6402c4162dde6; BYTE $0x88 - LEAQ PSHUFFLE_TRANSPOSE16_MASK1<>(SB), BX - LEAQ PSHUFFLE_TRANSPOSE16_MASK2<>(SB), R8 - QUAD $0x2262336f487e6162; QUAD $0x487e5162f27648b5; QUAD $0xd27648b53262106f; QUAD $0xa262136f487ee162; QUAD $0x487e5162d77640e5; QUAD $0xcf7640e53262086f; QUAD $0xa2621b6f487ee162; QUAD $0x487ec162dd7640f5; QUAD $0xfd7640f5a262386f; QUAD $0xa2620b6f487ee162; QUAD $0x487ec162cc7640fd; QUAD $0xec7640fda262286f; QUAD $0x8262036f487ee162; QUAD $0x487ec162c27640cd; QUAD $0xe27640cd8262206f; QUAD $0x8262336f487ee162; QUAD $0x487e4162f77640a5; QUAD $0xd77640a50262106f; QUAD $0x02621b6f487e6162; QUAD $0x487e4162dd7640b5; QUAD $0xfd7640b50262386f; QUAD $0x02620b6f487e6162; QUAD $0x487e4162cc7640bd; QUAD $0xec7640bd0262286f; QUAD $0x62eec023408d2362; QUAD $0x236244c023408da3; QUAD $0xada362eee42348ad; QUAD $0x40c5036244e42348; QUAD $0x2340c51362eef723; QUAD $0xfd2340d5036244d7; QUAD $0x44fd2340d58362ee; QUAD $0x62eeea2348b50362; QUAD $0x036244ea2348b583; QUAD $0xe51362eed32340e5; QUAD $0x40f5036244cb2340; QUAD $0x2340f58362eed923; QUAD $0xce2340ed236244d9; QUAD $0x44ce2340eda362ee; QUAD $0xc162d16f487ec162; QUAD $0x407dc262f26f487e; QUAD $0xcb004075c262c300; QUAD $0xc262d300406dc262; QUAD $0x405dc262db004065; QUAD $0xeb004055c262e300; QUAD $0xc262f300404dc262; QUAD $0x403d4262fb004045; QUAD $0xcb0040354262c300; QUAD $0x4262d300402d4262; QUAD $0x401d4262db004025; QUAD $0xeb0040154262e300; QUAD $0x4262f300400d4262; QUAD $0x48455162fb004005; QUAD $0xcc6f487e7162c4fe; QUAD $0x6206c472482df162; QUAD $0xf1620bc4724825f1; QUAD $0x55736219c472481d; QUAD $0x483d3162cace2548; QUAD $0xd42548255362c0fe; QUAD $0x62c1fe483d516296; QUAD $0x65d162c2fe483d51; QUAD $0x724845f162d8fe48; QUAD $0xc0724825f16202c0; QUAD $0x16c072481df1620d; QUAD $0x7362c86f487e7162; QUAD $0x25d362e8ca254875; QUAD $0x4845d16296fc2548; QUAD $0xf8fe4845d162f9fe; QUAD $0x6201626f487e7162; QUAD $0x916211c672481591; QUAD $0x05916213c672480d; QUAD $0x480d53620ad67248; QUAD $0xfe407dc16296ef25; QUAD $0x62c1fe407d8162c5; QUAD $0xb16207c1724815b1; QUAD $0x05b16212c172480d; QUAD $0x480d536203d17248; QUAD $0xfe407dc16296ef25; QUAD $0x62c4fe484d5162c5; QUAD $0x2df162cb6f487e71; QUAD $0x4825f16206c37248; QUAD $0x72481df1620bc372; QUAD $0xcd25485d736219c3; QUAD $0x62c1fe483d3162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xd0fe486dd162c2fe; QUAD $0x6202c772484df162; QUAD $0xf1620dc7724825f1; QUAD $0x7e716216c772481d; QUAD $0x25487d7362cf6f48; QUAD $0xf4254825d362e8c9; QUAD $0x62f1fe484dd16296; QUAD $0x7e7162f0fe484dd1; QUAD $0x4815916202626f48; QUAD $0x72480d916211c772; QUAD $0xd7724805916213c7; QUAD $0x96ef25480d53620a; QUAD $0x8162cdfe4075c162; QUAD $0x4815b162cafe4075; QUAD $0x72480db16207c272; QUAD $0xd2724805b16212c2; QUAD $0x96ef25480d536203; QUAD $0x5162cdfe4075c162; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x3162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x11c0724815b16203; QUAD $0x6213c072480db162; QUAD $0x53620ad0724805b1; QUAD $0x6dc16296ef25480d; QUAD $0xfe406d8162d5fe40; QUAD $0x07c3724815b162d3; QUAD $0x6212c372480db162; QUAD $0x536203d3724805b1; QUAD $0x6dc16296ef25480d; QUAD $0xfe485d5162d5fe40; QUAD $0x62c96f487e7162c4; QUAD $0xf16206c172482df1; QUAD $0x1df1620bc1724825; QUAD $0x486d736219c17248; QUAD $0xfe483d3162cacb25; QUAD $0x96d42548255362c3; QUAD $0x5162c1fe483d5162; QUAD $0x487dd162c2fe483d; QUAD $0xc572485df162c0fe; QUAD $0x0dc5724825f16202; QUAD $0x6216c572481df162; QUAD $0x4d7362cd6f487e71; QUAD $0x4825d362e8cf2548; QUAD $0xfe485dd16296e425; QUAD $0x62e0fe485dd162e1; QUAD $0xb16204626f487e71; QUAD $0x0db16211c1724815; QUAD $0x4805b16213c17248; QUAD $0x25480d53620ad172; QUAD $0xddfe4065c16296ef; QUAD $0xb162dcfe40658162; QUAD $0x0db16207c4724815; QUAD $0x4805b16212c47248; QUAD $0x25480d536203d472; QUAD $0xddfe4065c16296ef; QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; QUAD $0x724815b16205626f; QUAD $0xc272480db16211c2; QUAD $0x0ad2724805b16213; QUAD $0x6296ef25480d5362; QUAD $0x5d8162e5fe405dc1; QUAD $0x724815b162e5fe40; QUAD $0xc572480db16207c5; QUAD $0x03d5724805b16212; QUAD $0x6296ef25480d5362; QUAD $0x6d5162e5fe405dc1; QUAD $0x6f487e7162c4fe48; QUAD $0x06c772482df162cf; QUAD $0x620bc7724825f162; QUAD $0x736219c772481df1; QUAD $0x3d3162cac925487d; QUAD $0x2548255362c5fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x486df162f0fe484d; QUAD $0x724825f16202c372; QUAD $0xc372481df1620dc3; QUAD $0x62cb6f487e716216; QUAD $0xd362e8cd25485d73; QUAD $0x6dd16296d4254825; QUAD $0xfe486dd162d1fe48; QUAD $0x06626f487e7162d0; QUAD $0x6211c3724815b162; QUAD $0xb16213c372480db1; QUAD $0x0d53620ad3724805; QUAD $0x4055c16296ef2548; QUAD $0xeefe40558162edfe; QUAD $0x6207c6724815b162; QUAD $0xb16212c672480db1; QUAD $0x0d536203d6724805; QUAD $0x4055c16296ef2548; QUAD $0xc4fe48755162edfe; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d3162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x15b16207626f487e; QUAD $0x480db16211c47248; QUAD $0x724805b16213c472; QUAD $0xef25480d53620ad4; QUAD $0x62f5fe404dc16296; QUAD $0x15b162f7fe404d81; QUAD $0x480db16207c77248; QUAD $0x724805b16212c772; QUAD $0xef25480d536203d7; QUAD $0x62f5fe404dc16296; QUAD $0x7e7162c4fe487d51; QUAD $0x72482df162cd6f48; QUAD $0xc5724825f16206c5; QUAD $0x19c572481df1620b; QUAD $0x62cacf25484d7362; QUAD $0x255362c7fe483d31; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162e0fe485dd162; QUAD $0x25f16202c172487d; QUAD $0x481df1620dc17248; QUAD $0x6f487e716216c172; QUAD $0xe8cb25486d7362c9; QUAD $0x6296c4254825d362; QUAD $0x7dd162c1fe487dd1; QUAD $0x6f487e7162c0fe48; QUAD $0xc5724815b1620862; QUAD $0x13c572480db16211; QUAD $0x620ad5724805b162; QUAD $0xc16296ef25480d53; QUAD $0x4045a162fdfe4045; QUAD $0xc07248159162f8fe; QUAD $0x12c072480d916207; QUAD $0x6203d07248059162; QUAD $0xc16296ef25480d53; QUAD $0x48455162fdfe4045; QUAD $0xcc6f487e7162c4fe; QUAD $0x6206c472482df162; QUAD $0xf1620bc4724825f1; QUAD $0x55736219c472481d; QUAD $0x483d1162cace2548; QUAD $0xd42548255362c0fe; QUAD $0x62c1fe483d516296; QUAD $0x65d162c2fe483d51; QUAD $0x724845f162d8fe48; QUAD $0xc0724825f16202c0; QUAD $0x16c072481df1620d; QUAD $0x7362c86f487e7162; QUAD $0x25d362e8ca254875; QUAD $0x4845d16296fc2548; QUAD $0xf8fe4845d162f9fe; QUAD $0x6209626f487e7162; QUAD $0xb16211c6724815b1; QUAD $0x05b16213c672480d; QUAD $0x480d53620ad67248; QUAD $0xfe403d416296ef25; QUAD $0x62c1fe403d2162c5; QUAD $0x916207c172481591; QUAD $0x05916212c172480d; QUAD $0x480d536203d17248; QUAD $0xfe403d416296ef25; QUAD $0x62c4fe484d5162c5; QUAD $0x2df162cb6f487e71; QUAD $0x4825f16206c37248; QUAD $0x72481df1620bc372; QUAD $0xcd25485d736219c3; QUAD $0x62c1fe483d1162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xd0fe486dd162c2fe; QUAD $0x6202c772484df162; QUAD $0xf1620dc7724825f1; QUAD $0x7e716216c772481d; QUAD $0x25487d7362cf6f48; QUAD $0xf4254825d362e8c9; QUAD $0x62f1fe484dd16296; QUAD $0x7e7162f0fe484dd1; QUAD $0x4815b1620a626f48; QUAD $0x72480db16211c772; QUAD $0xd7724805b16213c7; QUAD $0x96ef25480d53620a; QUAD $0x2162cdfe40354162; QUAD $0x48159162cafe4035; QUAD $0x72480d916207c272; QUAD $0xd2724805916212c2; QUAD $0x96ef25480d536203; QUAD $0x5162cdfe40354162; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x1162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x11c072481591620b; QUAD $0x6213c072480d9162; QUAD $0x53620ad072480591; QUAD $0x2d416296ef25480d; QUAD $0xfe402d2162d5fe40; QUAD $0x07c37248159162d3; QUAD $0x6212c372480d9162; QUAD $0x536203d372480591; QUAD $0x2d416296ef25480d; QUAD $0xfe485d5162d5fe40; QUAD $0x62c96f487e7162c4; QUAD $0xf16206c172482df1; QUAD $0x1df1620bc1724825; QUAD $0x486d736219c17248; QUAD $0xfe483d1162cacb25; QUAD $0x96d42548255362c3; QUAD $0x5162c1fe483d5162; QUAD $0x487dd162c2fe483d; QUAD $0xc572485df162c0fe; QUAD $0x0dc5724825f16202; QUAD $0x6216c572481df162; QUAD $0x4d7362cd6f487e71; QUAD $0x4825d362e8cf2548; QUAD $0xfe485dd16296e425; QUAD $0x62e0fe485dd162e1; QUAD $0x91620c626f487e71; QUAD $0x0d916211c1724815; QUAD $0x4805916213c17248; QUAD $0x25480d53620ad172; QUAD $0xddfe4025416296ef; QUAD $0x9162dcfe40252162; QUAD $0x0d916207c4724815; QUAD $0x4805916212c47248; QUAD $0x25480d536203d472; QUAD $0xddfe4025416296ef; QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; QUAD $0x72481591620d626f; QUAD $0xc272480d916211c2; QUAD $0x0ad2724805916213; QUAD $0x6296ef25480d5362; QUAD $0x1d2162e5fe401d41; QUAD $0x7248159162e5fe40; QUAD $0xc572480d916207c5; QUAD $0x03d5724805916212; QUAD $0x6296ef25480d5362; QUAD $0x6d5162e5fe401d41; QUAD $0x6f487e7162c4fe48; QUAD $0x06c772482df162cf; QUAD $0x620bc7724825f162; QUAD $0x736219c772481df1; QUAD $0x3d1162cac925487d; QUAD $0x2548255362c5fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x486df162f0fe484d; QUAD $0x724825f16202c372; QUAD $0xc372481df1620dc3; QUAD $0x62cb6f487e716216; QUAD $0xd362e8cd25485d73; QUAD $0x6dd16296d4254825; QUAD $0xfe486dd162d1fe48; QUAD $0x0e626f487e7162d0; QUAD $0x6211c37248159162; QUAD $0x916213c372480d91; QUAD $0x0d53620ad3724805; QUAD $0x4015416296ef2548; QUAD $0xeefe40152162edfe; QUAD $0x6207c67248159162; QUAD $0x916212c672480d91; QUAD $0x0d536203d6724805; QUAD $0x4015416296ef2548; QUAD $0xc4fe48755162edfe; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d1162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x1591620f626f487e; QUAD $0x480d916211c47248; QUAD $0x724805916213c472; QUAD $0xef25480d53620ad4; QUAD $0x62f5fe400d416296; QUAD $0x159162f7fe400d21; QUAD $0x480d916207c77248; QUAD $0x724805916212c772; QUAD $0xef25480d536203d7; QUAD $0x62f5fe400d416296; QUAD $0x7e7162c4fe487d51; QUAD $0x72482df162cd6f48; QUAD $0xc5724825f16206c5; QUAD $0x19c572481df1620b; QUAD $0x62cacf25484d7362; QUAD $0x255362c7fe483d11; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162e0fe485dd162; QUAD $0x25f16202c172487d; QUAD $0x481df1620dc17248; QUAD $0x6f487e716216c172; QUAD $0xe8cb25486d7362c9; QUAD $0x6296c4254825d362; QUAD $0x7dd162c1fe487dd1; QUAD $0x6f487e7162c0fe48; QUAD $0xc572481591621062; QUAD $0x13c572480d916211; QUAD $0x620ad57248059162; QUAD $0x416296ef25480d53; QUAD $0x40050162fdfe4005; QUAD $0xc0724815b162f8fe; QUAD $0x12c072480db16207; QUAD $0x6203d0724805b162; QUAD $0x416296ef25480d53; QUAD $0x48455162fdfe4005; QUAD $0xcc6f487e7162c4fe; QUAD $0x6206c472482df162; QUAD $0xf1620bc4724825f1; QUAD $0x55736219c472481d; QUAD $0x483d3162cace2548; QUAD $0xd42548255362c0fe; QUAD $0x62c1fe483d516296; QUAD $0x65d162c2fe483d51; QUAD $0x724845f162d8fe48; QUAD $0xc0724825f16202c0; QUAD $0x16c072481df1620d; QUAD $0x7362c86f487e7162; QUAD $0x25d362e8ca254875; QUAD $0x4845d16296fc2548; QUAD $0xf8fe4845d162f9fe; QUAD $0x6211626f487e7162; QUAD $0x916211c672481591; QUAD $0x05916213c672480d; QUAD $0x480d53620ad67248; QUAD $0xfe407dc16296ef25; QUAD $0x62c1fe407d8162c5; QUAD $0xb16207c1724815b1; QUAD $0x05b16212c172480d; QUAD $0x480d536203d17248; QUAD $0xfe407dc16296ef25; QUAD $0x62c4fe484d5162c5; QUAD $0x2df162cb6f487e71; QUAD $0x4825f16206c37248; QUAD $0x72481df1620bc372; QUAD $0xcd25485d736219c3; QUAD $0x62c1fe483d3162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xd0fe486dd162c2fe; QUAD $0x6202c772484df162; QUAD $0xf1620dc7724825f1; QUAD $0x7e716216c772481d; QUAD $0x25487d7362cf6f48; QUAD $0xf4254825d362e8c9; QUAD $0x62f1fe484dd16296; QUAD $0x7e7162f0fe484dd1; QUAD $0x4815916212626f48; QUAD $0x72480d916211c772; QUAD $0xd7724805916213c7; QUAD $0x96ef25480d53620a; QUAD $0x8162cdfe4075c162; QUAD $0x4815b162cafe4075; QUAD $0x72480db16207c272; QUAD $0xd2724805b16212c2; QUAD $0x96ef25480d536203; QUAD $0x5162cdfe4075c162; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x3162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x11c0724815b16213; QUAD $0x6213c072480db162; QUAD $0x53620ad0724805b1; QUAD $0x6dc16296ef25480d; QUAD $0xfe406d8162d5fe40; QUAD $0x07c3724815b162d3; QUAD $0x6212c372480db162; QUAD $0x536203d3724805b1; QUAD $0x6dc16296ef25480d; QUAD $0xfe485d5162d5fe40; QUAD $0x62c96f487e7162c4; QUAD $0xf16206c172482df1; QUAD $0x1df1620bc1724825; QUAD $0x486d736219c17248; QUAD $0xfe483d3162cacb25; QUAD $0x96d42548255362c3; QUAD $0x5162c1fe483d5162; QUAD $0x487dd162c2fe483d; QUAD $0xc572485df162c0fe; QUAD $0x0dc5724825f16202; QUAD $0x6216c572481df162; QUAD $0x4d7362cd6f487e71; QUAD $0x4825d362e8cf2548; QUAD $0xfe485dd16296e425; QUAD $0x62e0fe485dd162e1; QUAD $0xb16214626f487e71; QUAD $0x0db16211c1724815; QUAD $0x4805b16213c17248; QUAD $0x25480d53620ad172; QUAD $0xddfe4065c16296ef; QUAD $0xb162dcfe40658162; QUAD $0x0db16207c4724815; QUAD $0x4805b16212c47248; QUAD $0x25480d536203d472; QUAD $0xddfe4065c16296ef; QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; QUAD $0x724815b16215626f; QUAD $0xc272480db16211c2; QUAD $0x0ad2724805b16213; QUAD $0x6296ef25480d5362; QUAD $0x5d8162e5fe405dc1; QUAD $0x724815b162e5fe40; QUAD $0xc572480db16207c5; QUAD $0x03d5724805b16212; QUAD $0x6296ef25480d5362; QUAD $0x6d5162e5fe405dc1; QUAD $0x6f487e7162c4fe48; QUAD $0x06c772482df162cf; QUAD $0x620bc7724825f162; QUAD $0x736219c772481df1; QUAD $0x3d3162cac925487d; QUAD $0x2548255362c5fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x486df162f0fe484d; QUAD $0x724825f16202c372; QUAD $0xc372481df1620dc3; QUAD $0x62cb6f487e716216; QUAD $0xd362e8cd25485d73; QUAD $0x6dd16296d4254825; QUAD $0xfe486dd162d1fe48; QUAD $0x16626f487e7162d0; QUAD $0x6211c3724815b162; QUAD $0xb16213c372480db1; QUAD $0x0d53620ad3724805; QUAD $0x4055c16296ef2548; QUAD $0xeefe40558162edfe; QUAD $0x6207c6724815b162; QUAD $0xb16212c672480db1; QUAD $0x0d536203d6724805; QUAD $0x4055c16296ef2548; QUAD $0xc4fe48755162edfe; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d3162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x15b16217626f487e; QUAD $0x480db16211c47248; QUAD $0x724805b16213c472; QUAD $0xef25480d53620ad4; QUAD $0x62f5fe404dc16296; QUAD $0x15b162f7fe404d81; QUAD $0x480db16207c77248; QUAD $0x724805b16212c772; QUAD $0xef25480d536203d7; QUAD $0x62f5fe404dc16296; QUAD $0x7e7162c4fe487d51; QUAD $0x72482df162cd6f48; QUAD $0xc5724825f16206c5; QUAD $0x19c572481df1620b; QUAD $0x62cacf25484d7362; QUAD $0x255362c7fe483d31; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162e0fe485dd162; QUAD $0x25f16202c172487d; QUAD $0x481df1620dc17248; QUAD $0x6f487e716216c172; QUAD $0xe8cb25486d7362c9; QUAD $0x6296c4254825d362; QUAD $0x7dd162c1fe487dd1; QUAD $0x6f487e7162c0fe48; QUAD $0xc5724815b1621862; QUAD $0x13c572480db16211; QUAD $0x620ad5724805b162; QUAD $0xc16296ef25480d53; QUAD $0x4045a162fdfe4045; QUAD $0xc07248159162f8fe; QUAD $0x12c072480d916207; QUAD $0x6203d07248059162; QUAD $0xc16296ef25480d53; QUAD $0x48455162fdfe4045; QUAD $0xcc6f487e7162c4fe; QUAD $0x6206c472482df162; QUAD $0xf1620bc4724825f1; QUAD $0x55736219c472481d; QUAD $0x483d1162cace2548; QUAD $0xd42548255362c0fe; QUAD $0x62c1fe483d516296; QUAD $0x65d162c2fe483d51; QUAD $0x724845f162d8fe48; QUAD $0xc0724825f16202c0; QUAD $0x16c072481df1620d; QUAD $0x7362c86f487e7162; QUAD $0x25d362e8ca254875; QUAD $0x4845d16296fc2548; QUAD $0xf8fe4845d162f9fe; QUAD $0x6219626f487e7162; QUAD $0xb16211c6724815b1; QUAD $0x05b16213c672480d; QUAD $0x480d53620ad67248; QUAD $0xfe403d416296ef25; QUAD $0x62c1fe403d2162c5; QUAD $0x916207c172481591; QUAD $0x05916212c172480d; QUAD $0x480d536203d17248; QUAD $0xfe403d416296ef25; QUAD $0x62c4fe484d5162c5; QUAD $0x2df162cb6f487e71; QUAD $0x4825f16206c37248; QUAD $0x72481df1620bc372; QUAD $0xcd25485d736219c3; QUAD $0x62c1fe483d1162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xd0fe486dd162c2fe; QUAD $0x6202c772484df162; QUAD $0xf1620dc7724825f1; QUAD $0x7e716216c772481d; QUAD $0x25487d7362cf6f48; QUAD $0xf4254825d362e8c9; QUAD $0x62f1fe484dd16296; QUAD $0x7e7162f0fe484dd1; QUAD $0x4815b1621a626f48; QUAD $0x72480db16211c772; QUAD $0xd7724805b16213c7; QUAD $0x96ef25480d53620a; QUAD $0x2162cdfe40354162; QUAD $0x48159162cafe4035; QUAD $0x72480d916207c272; QUAD $0xd2724805916212c2; QUAD $0x96ef25480d536203; QUAD $0x5162cdfe40354162; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x1162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x11c072481591621b; QUAD $0x6213c072480d9162; QUAD $0x53620ad072480591; QUAD $0x2d416296ef25480d; QUAD $0xfe402d2162d5fe40; QUAD $0x07c37248159162d3; QUAD $0x6212c372480d9162; QUAD $0x536203d372480591; QUAD $0x2d416296ef25480d; QUAD $0xfe485d5162d5fe40; QUAD $0x62c96f487e7162c4; QUAD $0xf16206c172482df1; QUAD $0x1df1620bc1724825; QUAD $0x486d736219c17248; QUAD $0xfe483d1162cacb25; QUAD $0x96d42548255362c3; QUAD $0x5162c1fe483d5162; QUAD $0x487dd162c2fe483d; QUAD $0xc572485df162c0fe; QUAD $0x0dc5724825f16202; QUAD $0x6216c572481df162; QUAD $0x4d7362cd6f487e71; QUAD $0x4825d362e8cf2548; QUAD $0xfe485dd16296e425; QUAD $0x62e0fe485dd162e1; QUAD $0x91621c626f487e71; QUAD $0x0d916211c1724815; QUAD $0x4805916213c17248; QUAD $0x25480d53620ad172; QUAD $0xddfe4025416296ef; QUAD $0x9162dcfe40252162; QUAD $0x0d916207c4724815; QUAD $0x4805916212c47248; QUAD $0x25480d536203d472; QUAD $0xddfe4025416296ef; QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; QUAD $0x72481591621d626f; QUAD $0xc272480d916211c2; QUAD $0x0ad2724805916213; QUAD $0x6296ef25480d5362; QUAD $0x1d2162e5fe401d41; QUAD $0x7248159162e5fe40; QUAD $0xc572480d916207c5; QUAD $0x03d5724805916212; QUAD $0x6296ef25480d5362; QUAD $0x6d5162e5fe401d41; QUAD $0x6f487e7162c4fe48; QUAD $0x06c772482df162cf; QUAD $0x620bc7724825f162; QUAD $0x736219c772481df1; QUAD $0x3d1162cac925487d; QUAD $0x2548255362c5fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x486df162f0fe484d; QUAD $0x724825f16202c372; QUAD $0xc372481df1620dc3; QUAD $0x62cb6f487e716216; QUAD $0xd362e8cd25485d73; QUAD $0x6dd16296d4254825; QUAD $0xfe486dd162d1fe48; QUAD $0x1e626f487e7162d0; QUAD $0x6211c37248159162; QUAD $0x916213c372480d91; QUAD $0x0d53620ad3724805; QUAD $0x4015416296ef2548; QUAD $0xeefe40152162edfe; QUAD $0x6207c67248159162; QUAD $0x916212c672480d91; QUAD $0x0d536203d6724805; QUAD $0x4015416296ef2548; QUAD $0xc4fe48755162edfe; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d1162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x1591621f626f487e; QUAD $0x480d916211c47248; QUAD $0x724805916213c472; QUAD $0xef25480d53620ad4; QUAD $0x62f5fe400d416296; QUAD $0x159162f7fe400d21; QUAD $0x480d916207c77248; QUAD $0x724805916212c772; QUAD $0xef25480d536203d7; QUAD $0x62f5fe400d416296; QUAD $0x7e7162c4fe487d51; QUAD $0x72482df162cd6f48; QUAD $0xc5724825f16206c5; QUAD $0x19c572481df1620b; QUAD $0x62cacf25484d7362; QUAD $0x255362c7fe483d11; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162e0fe485dd162; QUAD $0x25f16202c172487d; QUAD $0x481df1620dc17248; QUAD $0x6f487e716216c172; QUAD $0xe8cb25486d7362c9; QUAD $0x6296c4254825d362; QUAD $0x7dd162c1fe487dd1; QUAD $0x6f487e7162c0fe48; QUAD $0xc572481591622062; QUAD $0x13c572480d916211; QUAD $0x620ad57248059162; QUAD $0x416296ef25480d53; QUAD $0x40050162fdfe4005; QUAD $0xc0724815b162f8fe; QUAD $0x12c072480db16207; QUAD $0x6203d0724805b162; QUAD $0x416296ef25480d53; QUAD $0x48455162fdfe4005; QUAD $0xcc6f487e7162c4fe; QUAD $0x6206c472482df162; QUAD $0xf1620bc4724825f1; QUAD $0x55736219c472481d; QUAD $0x483d3162cace2548; QUAD $0xd42548255362c0fe; QUAD $0x62c1fe483d516296; QUAD $0x65d162c2fe483d51; QUAD $0x724845f162d8fe48; QUAD $0xc0724825f16202c0; QUAD $0x16c072481df1620d; QUAD $0x7362c86f487e7162; QUAD $0x25d362e8ca254875; QUAD $0x4845d16296fc2548; QUAD $0xf8fe4845d162f9fe; QUAD $0x6221626f487e7162; QUAD $0x916211c672481591; QUAD $0x05916213c672480d; QUAD $0x480d53620ad67248; QUAD $0xfe407dc16296ef25; QUAD $0x62c1fe407d8162c5; QUAD $0xb16207c1724815b1; QUAD $0x05b16212c172480d; QUAD $0x480d536203d17248; QUAD $0xfe407dc16296ef25; QUAD $0x62c4fe484d5162c5; QUAD $0x2df162cb6f487e71; QUAD $0x4825f16206c37248; QUAD $0x72481df1620bc372; QUAD $0xcd25485d736219c3; QUAD $0x62c1fe483d3162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xd0fe486dd162c2fe; QUAD $0x6202c772484df162; QUAD $0xf1620dc7724825f1; QUAD $0x7e716216c772481d; QUAD $0x25487d7362cf6f48; QUAD $0xf4254825d362e8c9; QUAD $0x62f1fe484dd16296; QUAD $0x7e7162f0fe484dd1; QUAD $0x4815916222626f48; QUAD $0x72480d916211c772; QUAD $0xd7724805916213c7; QUAD $0x96ef25480d53620a; QUAD $0x8162cdfe4075c162; QUAD $0x4815b162cafe4075; QUAD $0x72480db16207c272; QUAD $0xd2724805b16212c2; QUAD $0x96ef25480d536203; QUAD $0x5162cdfe4075c162; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x3162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x11c0724815b16223; QUAD $0x6213c072480db162; QUAD $0x53620ad0724805b1; QUAD $0x6dc16296ef25480d; QUAD $0xfe406d8162d5fe40; QUAD $0x07c3724815b162d3; QUAD $0x6212c372480db162; QUAD $0x536203d3724805b1; QUAD $0x6dc16296ef25480d; QUAD $0xfe485d5162d5fe40; QUAD $0x62c96f487e7162c4; QUAD $0xf16206c172482df1; QUAD $0x1df1620bc1724825; QUAD $0x486d736219c17248; QUAD $0xfe483d3162cacb25; QUAD $0x96d42548255362c3; QUAD $0x5162c1fe483d5162; QUAD $0x487dd162c2fe483d; QUAD $0xc572485df162c0fe; QUAD $0x0dc5724825f16202; QUAD $0x6216c572481df162; QUAD $0x4d7362cd6f487e71; QUAD $0x4825d362e8cf2548; QUAD $0xfe485dd16296e425; QUAD $0x62e0fe485dd162e1; QUAD $0xb16224626f487e71; QUAD $0x0db16211c1724815; QUAD $0x4805b16213c17248; QUAD $0x25480d53620ad172; QUAD $0xddfe4065c16296ef; QUAD $0xb162dcfe40658162; QUAD $0x0db16207c4724815; QUAD $0x4805b16212c47248; QUAD $0x25480d536203d472; QUAD $0xddfe4065c16296ef; QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; QUAD $0x724815b16225626f; QUAD $0xc272480db16211c2; QUAD $0x0ad2724805b16213; QUAD $0x6296ef25480d5362; QUAD $0x5d8162e5fe405dc1; QUAD $0x724815b162e5fe40; QUAD $0xc572480db16207c5; QUAD $0x03d5724805b16212; QUAD $0x6296ef25480d5362; QUAD $0x6d5162e5fe405dc1; QUAD $0x6f487e7162c4fe48; QUAD $0x06c772482df162cf; QUAD $0x620bc7724825f162; QUAD $0x736219c772481df1; QUAD $0x3d3162cac925487d; QUAD $0x2548255362c5fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x486df162f0fe484d; QUAD $0x724825f16202c372; QUAD $0xc372481df1620dc3; QUAD $0x62cb6f487e716216; QUAD $0xd362e8cd25485d73; QUAD $0x6dd16296d4254825; QUAD $0xfe486dd162d1fe48; QUAD $0x26626f487e7162d0; QUAD $0x6211c3724815b162; QUAD $0xb16213c372480db1; QUAD $0x0d53620ad3724805; QUAD $0x4055c16296ef2548; QUAD $0xeefe40558162edfe; QUAD $0x6207c6724815b162; QUAD $0xb16212c672480db1; QUAD $0x0d536203d6724805; QUAD $0x4055c16296ef2548; QUAD $0xc4fe48755162edfe; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d3162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x15b16227626f487e; QUAD $0x480db16211c47248; QUAD $0x724805b16213c472; QUAD $0xef25480d53620ad4; QUAD $0x62f5fe404dc16296; QUAD $0x15b162f7fe404d81; QUAD $0x480db16207c77248; QUAD $0x724805b16212c772; QUAD $0xef25480d536203d7; QUAD $0x62f5fe404dc16296; QUAD $0x7e7162c4fe487d51; QUAD $0x72482df162cd6f48; QUAD $0xc5724825f16206c5; QUAD $0x19c572481df1620b; QUAD $0x62cacf25484d7362; QUAD $0x255362c7fe483d31; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162e0fe485dd162; QUAD $0x25f16202c172487d; QUAD $0x481df1620dc17248; QUAD $0x6f487e716216c172; QUAD $0xe8cb25486d7362c9; QUAD $0x6296c4254825d362; QUAD $0x7dd162c1fe487dd1; QUAD $0x6f487e7162c0fe48; QUAD $0xc5724815b1622862; QUAD $0x13c572480db16211; QUAD $0x620ad5724805b162; QUAD $0xc16296ef25480d53; QUAD $0x4045a162fdfe4045; QUAD $0xc07248159162f8fe; QUAD $0x12c072480d916207; QUAD $0x6203d07248059162; QUAD $0xc16296ef25480d53; QUAD $0x48455162fdfe4045; QUAD $0xcc6f487e7162c4fe; QUAD $0x6206c472482df162; QUAD $0xf1620bc4724825f1; QUAD $0x55736219c472481d; QUAD $0x483d1162cace2548; QUAD $0xd42548255362c0fe; QUAD $0x62c1fe483d516296; QUAD $0x65d162c2fe483d51; QUAD $0x724845f162d8fe48; QUAD $0xc0724825f16202c0; QUAD $0x16c072481df1620d; QUAD $0x7362c86f487e7162; QUAD $0x25d362e8ca254875; QUAD $0x4845d16296fc2548; QUAD $0xf8fe4845d162f9fe; QUAD $0x6229626f487e7162; QUAD $0xb16211c6724815b1; QUAD $0x05b16213c672480d; QUAD $0x480d53620ad67248; QUAD $0xfe403d416296ef25; QUAD $0x62c1fe403d2162c5; QUAD $0x916207c172481591; QUAD $0x05916212c172480d; QUAD $0x480d536203d17248; QUAD $0xfe403d416296ef25; QUAD $0x62c4fe484d5162c5; QUAD $0x2df162cb6f487e71; QUAD $0x4825f16206c37248; QUAD $0x72481df1620bc372; QUAD $0xcd25485d736219c3; QUAD $0x62c1fe483d1162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xd0fe486dd162c2fe; QUAD $0x6202c772484df162; QUAD $0xf1620dc7724825f1; QUAD $0x7e716216c772481d; QUAD $0x25487d7362cf6f48; QUAD $0xf4254825d362e8c9; QUAD $0x62f1fe484dd16296; QUAD $0x7e7162f0fe484dd1; QUAD $0x4815b1622a626f48; QUAD $0x72480db16211c772; QUAD $0xd7724805b16213c7; QUAD $0x96ef25480d53620a; QUAD $0x2162cdfe40354162; QUAD $0x48159162cafe4035; QUAD $0x72480d916207c272; QUAD $0xd2724805916212c2; QUAD $0x96ef25480d536203; QUAD $0x5162cdfe40354162; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x1162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x11c072481591622b; QUAD $0x6213c072480d9162; QUAD $0x53620ad072480591; QUAD $0x2d416296ef25480d; QUAD $0xfe402d2162d5fe40; QUAD $0x07c37248159162d3; QUAD $0x6212c372480d9162; QUAD $0x536203d372480591; QUAD $0x2d416296ef25480d; QUAD $0xfe485d5162d5fe40; QUAD $0x62c96f487e7162c4; QUAD $0xf16206c172482df1; QUAD $0x1df1620bc1724825; QUAD $0x486d736219c17248; QUAD $0xfe483d1162cacb25; QUAD $0x96d42548255362c3; QUAD $0x5162c1fe483d5162; QUAD $0x487dd162c2fe483d; QUAD $0xc572485df162c0fe; QUAD $0x0dc5724825f16202; QUAD $0x6216c572481df162; QUAD $0x4d7362cd6f487e71; QUAD $0x4825d362e8cf2548; QUAD $0xfe485dd16296e425; QUAD $0x62e0fe485dd162e1; QUAD $0x91622c626f487e71; QUAD $0x0d916211c1724815; QUAD $0x4805916213c17248; QUAD $0x25480d53620ad172; QUAD $0xddfe4025416296ef; QUAD $0x9162dcfe40252162; QUAD $0x0d916207c4724815; QUAD $0x4805916212c47248; QUAD $0x25480d536203d472; QUAD $0xddfe4025416296ef; QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; QUAD $0x72481591622d626f; QUAD $0xc272480d916211c2; QUAD $0x0ad2724805916213; QUAD $0x6296ef25480d5362; QUAD $0x1d2162e5fe401d41; QUAD $0x7248159162e5fe40; QUAD $0xc572480d916207c5; QUAD $0x03d5724805916212; QUAD $0x6296ef25480d5362; QUAD $0x6d5162e5fe401d41; QUAD $0x6f487e7162c4fe48; QUAD $0x06c772482df162cf; QUAD $0x620bc7724825f162; QUAD $0x736219c772481df1; QUAD $0x3d1162cac925487d; QUAD $0x2548255362c5fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x486df162f0fe484d; QUAD $0x724825f16202c372; QUAD $0xc372481df1620dc3; QUAD $0x62cb6f487e716216; QUAD $0xd362e8cd25485d73; QUAD $0x6dd16296d4254825; QUAD $0xfe486dd162d1fe48; QUAD $0x2e626f487e7162d0; QUAD $0x6211c37248159162; QUAD $0x916213c372480d91; QUAD $0x0d53620ad3724805; QUAD $0x4015416296ef2548; QUAD $0xeefe40152162edfe; QUAD $0x6207c67248159162; QUAD $0x916212c672480d91; QUAD $0x0d536203d6724805; QUAD $0x4015416296ef2548; QUAD $0xc4fe48755162edfe; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d1162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x1591622f626f487e; QUAD $0x480d916211c47248; QUAD $0x724805916213c472; QUAD $0xef25480d53620ad4; QUAD $0x62f5fe400d416296; QUAD $0x159162f7fe400d21; QUAD $0x480d916207c77248; QUAD $0x724805916212c772; QUAD $0xef25480d536203d7; QUAD $0x62f5fe400d416296; QUAD $0x7e7162c4fe487d51; QUAD $0x72482df162cd6f48; QUAD $0xc5724825f16206c5; QUAD $0x19c572481df1620b; QUAD $0x62cacf25484d7362; QUAD $0x255362c7fe483d11; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162e0fe485dd162; QUAD $0x25f16202c172487d; QUAD $0x481df1620dc17248; QUAD $0x6f487e716216c172; QUAD $0xe8cb25486d7362c9; QUAD $0x6296c4254825d362; QUAD $0x7dd162c1fe487dd1; QUAD $0x6f487e7162c0fe48; QUAD $0xc572481591623062; QUAD $0x13c572480d916211; QUAD $0x620ad57248059162; QUAD $0x416296ef25480d53; QUAD $0x40050162fdfe4005; QUAD $0xc0724815b162f8fe; QUAD $0x12c072480db16207; QUAD $0x6203d0724805b162; QUAD $0x416296ef25480d53; QUAD $0x01ee8348fdfe4005 - JE lastLoop - ADDQ $8, R13 - MOVQ (R13), R14 - QUAD $0x7162c4fe48455162; QUAD $0x482df162cc6f487e; QUAD $0x724825f16206c472; QUAD $0xc472481df1620bc4; QUAD $0xcace254855736219; QUAD $0x5362c0fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62d8fe4865d162c2; QUAD $0xf16202c0724845f1; QUAD $0x1df1620dc0724825; QUAD $0x487e716216c07248; QUAD $0xca2548757362c86f; QUAD $0x96fc254825d362e8; QUAD $0xd162f9fe4845d162; QUAD $0x487e7162f8fe4845; WORD $0x626f; BYTE $0x31 - TESTQ $(1<<0), R14 - JE skipNext0 - MOVQ 0*24(AX), R9 - LONG $0x487cc162; WORD $0x0410; BYTE $0x09 - -skipNext0: - QUAD $0x7162c4fe484d5162; QUAD $0x482df162cb6f487e; QUAD $0x724825f16206c372; QUAD $0xc372481df1620bc3; QUAD $0xcacd25485d736219; QUAD $0x5362c1fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62d0fe486dd162c2; QUAD $0xf16202c772484df1; QUAD $0x1df1620dc7724825; QUAD $0x487e716216c77248; QUAD $0xc925487d7362cf6f; QUAD $0x96f4254825d362e8; QUAD $0xd162f1fe484dd162; QUAD $0x487e7162f0fe484d; WORD $0x626f; BYTE $0x32 - TESTQ $(1<<1), R14 - JE skipNext1 - MOVQ 1*24(AX), R9 - LONG $0x487cc162; WORD $0x0c10; BYTE $0x09 - -skipNext1: - QUAD $0x7162c4fe48555162; QUAD $0x482df162ca6f487e; QUAD $0x724825f16206c272; QUAD $0xc272481df1620bc2; QUAD $0xcacc254865736219; QUAD $0x5362c2fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62c8fe4875d162c2; QUAD $0xf16202c6724855f1; QUAD $0x1df1620dc6724825; QUAD $0x487e716216c67248; QUAD $0xc82548457362ce6f; QUAD $0x96ec254825d362e8; QUAD $0xd162e9fe4855d162; QUAD $0x487e7162e8fe4855; WORD $0x626f; BYTE $0x33 - TESTQ $(1<<2), R14 - JE skipNext2 - MOVQ 2*24(AX), R9 - LONG $0x487cc162; WORD $0x1410; BYTE $0x09 - -skipNext2: - QUAD $0x7162c4fe485d5162; QUAD $0x482df162c96f487e; QUAD $0x724825f16206c172; QUAD $0xc172481df1620bc1; QUAD $0xcacb25486d736219; QUAD $0x5362c3fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62c0fe487dd162c2; QUAD $0xf16202c572485df1; QUAD $0x1df1620dc5724825; QUAD $0x487e716216c57248; QUAD $0xcf25484d7362cd6f; QUAD $0x96e4254825d362e8; QUAD $0xd162e1fe485dd162; QUAD $0x487e7162e0fe485d; WORD $0x626f; BYTE $0x34 - TESTQ $(1<<3), R14 - JE skipNext3 - MOVQ 3*24(AX), R9 - LONG $0x487cc162; WORD $0x1c10; BYTE $0x09 - -skipNext3: - QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; WORD $0x626f; BYTE $0x35 - TESTQ $(1<<4), R14 - JE skipNext4 - MOVQ 4*24(AX), R9 - LONG $0x487cc162; WORD $0x2410; BYTE $0x09 - -skipNext4: - QUAD $0x7162c4fe486d5162; QUAD $0x482df162cf6f487e; QUAD $0x724825f16206c772; QUAD $0xc772481df1620bc7; QUAD $0xcac925487d736219; QUAD $0x5362c5fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f0fe484dd162c2; QUAD $0xf16202c372486df1; QUAD $0x1df1620dc3724825; QUAD $0x487e716216c37248; QUAD $0xcd25485d7362cb6f; QUAD $0x96d4254825d362e8; QUAD $0xd162d1fe486dd162; QUAD $0x487e7162d0fe486d; WORD $0x626f; BYTE $0x36 - TESTQ $(1<<5), R14 - JE skipNext5 - MOVQ 5*24(AX), R9 - LONG $0x487cc162; WORD $0x2c10; BYTE $0x09 - -skipNext5: - QUAD $0x7162c4fe48755162; QUAD $0x482df162ce6f487e; QUAD $0x724825f16206c672; QUAD $0xc672481df1620bc6; QUAD $0xcac8254845736219; QUAD $0x5362c6fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62e8fe4855d162c2; QUAD $0xf16202c2724875f1; QUAD $0x1df1620dc2724825; QUAD $0x487e716216c27248; QUAD $0xcc2548657362ca6f; QUAD $0x96cc254825d362e8; QUAD $0xd162c9fe4875d162; QUAD $0x487e7162c8fe4875; WORD $0x626f; BYTE $0x37 - TESTQ $(1<<6), R14 - JE skipNext6 - MOVQ 6*24(AX), R9 - LONG $0x487cc162; WORD $0x3410; BYTE $0x09 - -skipNext6: - QUAD $0x7162c4fe487d5162; QUAD $0x482df162cd6f487e; QUAD $0x724825f16206c572; QUAD $0xc572481df1620bc5; QUAD $0xcacf25484d736219; QUAD $0x5362c7fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62e0fe485dd162c2; QUAD $0xf16202c172487df1; QUAD $0x1df1620dc1724825; QUAD $0x487e716216c17248; QUAD $0xcb25486d7362c96f; QUAD $0x96c4254825d362e8; QUAD $0xd162c1fe487dd162; QUAD $0x487e7162c0fe487d; WORD $0x626f; BYTE $0x38 - TESTQ $(1<<7), R14 - JE skipNext7 - MOVQ 7*24(AX), R9 - LONG $0x487cc162; WORD $0x3c10; BYTE $0x09 - -skipNext7: - QUAD $0x7162c4fe48455162; QUAD $0x482df162cc6f487e; QUAD $0x724825f16206c472; QUAD $0xc472481df1620bc4; QUAD $0xcace254855736219; QUAD $0x5362c0fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62d8fe4865d162c2; QUAD $0xf16202c0724845f1; QUAD $0x1df1620dc0724825; QUAD $0x487e716216c07248; QUAD $0xca2548757362c86f; QUAD $0x96fc254825d362e8; QUAD $0xd162f9fe4845d162; QUAD $0x487e7162f8fe4845; WORD $0x626f; BYTE $0x39 - TESTQ $(1<<8), R14 - JE skipNext8 - MOVQ 8*24(AX), R9 - LONG $0x487c4162; WORD $0x0410; BYTE $0x09 - -skipNext8: - QUAD $0x7162c4fe484d5162; QUAD $0x482df162cb6f487e; QUAD $0x724825f16206c372; QUAD $0xc372481df1620bc3; QUAD $0xcacd25485d736219; QUAD $0x5362c1fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62d0fe486dd162c2; QUAD $0xf16202c772484df1; QUAD $0x1df1620dc7724825; QUAD $0x487e716216c77248; QUAD $0xc925487d7362cf6f; QUAD $0x96f4254825d362e8; QUAD $0xd162f1fe484dd162; QUAD $0x487e7162f0fe484d; WORD $0x626f; BYTE $0x3a - TESTQ $(1<<9), R14 - JE skipNext9 - MOVQ 9*24(AX), R9 - LONG $0x487c4162; WORD $0x0c10; BYTE $0x09 - -skipNext9: - QUAD $0x7162c4fe48555162; QUAD $0x482df162ca6f487e; QUAD $0x724825f16206c272; QUAD $0xc272481df1620bc2; QUAD $0xcacc254865736219; QUAD $0x5362c2fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62c8fe4875d162c2; QUAD $0xf16202c6724855f1; QUAD $0x1df1620dc6724825; QUAD $0x487e716216c67248; QUAD $0xc82548457362ce6f; QUAD $0x96ec254825d362e8; QUAD $0xd162e9fe4855d162; QUAD $0x487e7162e8fe4855; WORD $0x626f; BYTE $0x3b - TESTQ $(1<<10), R14 - JE skipNext10 - MOVQ 10*24(AX), R9 - LONG $0x487c4162; WORD $0x1410; BYTE $0x09 - -skipNext10: - QUAD $0x7162c4fe485d5162; QUAD $0x482df162c96f487e; QUAD $0x724825f16206c172; QUAD $0xc172481df1620bc1; QUAD $0xcacb25486d736219; QUAD $0x5362c3fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62c0fe487dd162c2; QUAD $0xf16202c572485df1; QUAD $0x1df1620dc5724825; QUAD $0x487e716216c57248; QUAD $0xcf25484d7362cd6f; QUAD $0x96e4254825d362e8; QUAD $0xd162e1fe485dd162; QUAD $0x487e7162e0fe485d; WORD $0x626f; BYTE $0x3c - TESTQ $(1<<11), R14 - JE skipNext11 - MOVQ 11*24(AX), R9 - LONG $0x487c4162; WORD $0x1c10; BYTE $0x09 - -skipNext11: - QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; WORD $0x626f; BYTE $0x3d - TESTQ $(1<<12), R14 - JE skipNext12 - MOVQ 12*24(AX), R9 - LONG $0x487c4162; WORD $0x2410; BYTE $0x09 - -skipNext12: - QUAD $0x7162c4fe486d5162; QUAD $0x482df162cf6f487e; QUAD $0x724825f16206c772; QUAD $0xc772481df1620bc7; QUAD $0xcac925487d736219; QUAD $0x5362c5fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f0fe484dd162c2; QUAD $0xf16202c372486df1; QUAD $0x1df1620dc3724825; QUAD $0x487e716216c37248; QUAD $0xcd25485d7362cb6f; QUAD $0x96d4254825d362e8; QUAD $0xd162d1fe486dd162; QUAD $0x487e7162d0fe486d; WORD $0x626f; BYTE $0x3e - TESTQ $(1<<13), R14 - JE skipNext13 - MOVQ 13*24(AX), R9 - LONG $0x487c4162; WORD $0x2c10; BYTE $0x09 - -skipNext13: - QUAD $0x7162c4fe48755162; QUAD $0x482df162ce6f487e; QUAD $0x724825f16206c672; QUAD $0xc672481df1620bc6; QUAD $0xcac8254845736219; QUAD $0x5362c6fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62e8fe4855d162c2; QUAD $0xf16202c2724875f1; QUAD $0x1df1620dc2724825; QUAD $0x487e716216c27248; QUAD $0xcc2548657362ca6f; QUAD $0x96cc254825d362e8; QUAD $0xd162c9fe4875d162; QUAD $0x487e7162c8fe4875; WORD $0x626f; BYTE $0x3f - TESTQ $(1<<14), R14 - JE skipNext14 - MOVQ 14*24(AX), R9 - LONG $0x487c4162; WORD $0x3410; BYTE $0x09 - -skipNext14: - QUAD $0x7162c4fe487d5162; QUAD $0x482df162cd6f487e; QUAD $0x724825f16206c572; QUAD $0xc572481df1620bc5; QUAD $0xcacf25484d736219; QUAD $0x5362c7fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62e0fe485dd162c2; QUAD $0xf16202c172487df1; QUAD $0x1df1620dc1724825; QUAD $0x487e716216c17248; QUAD $0xcb25486d7362c96f; QUAD $0x96c4254825d362e8; QUAD $0xd162c1fe487dd162; QUAD $0x487e7162c0fe487d; WORD $0x626f; BYTE $0x40 - TESTQ $(1<<15), R14 - JE skipNext15 - MOVQ 15*24(AX), R9 - LONG $0x487c4162; WORD $0x3c10; BYTE $0x09 - -skipNext15: - QUAD $0xd162d86f487e7162; QUAD $0x7dd16224046f487e; QUAD $0x6f487e7162c3fe49; QUAD $0x244c6f487ed162d9; QUAD $0x62cbfe4975d16201; QUAD $0x7ed162da6f487e71; QUAD $0x6dd1620224546f48; QUAD $0x6f487e7162d3fe49; QUAD $0x245c6f487ed162db; QUAD $0x62dbfe4965d16203; QUAD $0x7ed162dc6f487e71; QUAD $0x5dd1620424646f48; QUAD $0x6f487e7162e3fe49; QUAD $0x246c6f487ed162dd; QUAD $0x62ebfe4955d16205; QUAD $0x7ed162de6f487e71; QUAD $0x4dd1620624746f48; QUAD $0x6f487e7162f3fe49; QUAD $0x247c6f487ed162df; QUAD $0xc4fbfe4945d16207; LONG $0xce92fbc1 - JMP lloop - -lastLoop: - QUAD $0x7162c4fe48455162; QUAD $0x482df162cc6f487e; QUAD $0x724825f16206c472; QUAD $0xc472481df1620bc4; QUAD $0xcace254855736219; QUAD $0x5362c0fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62d8fe4865d162c2; QUAD $0xf16202c0724845f1; QUAD $0x1df1620dc0724825; QUAD $0x487e716216c07248; QUAD $0xca2548757362c86f; QUAD $0x96fc254825d362e8; QUAD $0xd162f9fe4845d162; QUAD $0x487e7162f8fe4845; QUAD $0xfe484d516231626f; QUAD $0x62cb6f487e7162c4; QUAD $0xf16206c372482df1; QUAD $0x1df1620bc3724825; QUAD $0x485d736219c37248; QUAD $0xfe483d3162cacd25; QUAD $0x96d42548255362c1; QUAD $0x5162c1fe483d5162; QUAD $0x486dd162c2fe483d; QUAD $0xc772484df162d0fe; QUAD $0x0dc7724825f16202; QUAD $0x6216c772481df162; QUAD $0x7d7362cf6f487e71; QUAD $0x4825d362e8c92548; QUAD $0xfe484dd16296f425; QUAD $0x62f0fe484dd162f1; QUAD $0x516232626f487e71; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x3162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x62c4fe485d516233; QUAD $0x2df162c96f487e71; QUAD $0x4825f16206c17248; QUAD $0x72481df1620bc172; QUAD $0xcb25486d736219c1; QUAD $0x62c3fe483d3162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xc0fe487dd162c2fe; QUAD $0x6202c572485df162; QUAD $0xf1620dc5724825f1; QUAD $0x7e716216c572481d; QUAD $0x25484d7362cd6f48; QUAD $0xe4254825d362e8cf; QUAD $0x62e1fe485dd16296; QUAD $0x7e7162e0fe485dd1; QUAD $0x4865516234626f48; QUAD $0xc86f487e7162c4fe; QUAD $0x6206c072482df162; QUAD $0xf1620bc0724825f1; QUAD $0x75736219c072481d; QUAD $0x483d3162caca2548; QUAD $0xd42548255362c4fe; QUAD $0x62c1fe483d516296; QUAD $0x45d162c2fe483d51; QUAD $0x724865f162f8fe48; QUAD $0xc4724825f16202c4; QUAD $0x16c472481df1620d; QUAD $0x7362cc6f487e7162; QUAD $0x25d362e8ce254855; QUAD $0x4865d16296dc2548; QUAD $0xd8fe4865d162d9fe; QUAD $0x6235626f487e7162; QUAD $0x7e7162c4fe486d51; QUAD $0x72482df162cf6f48; QUAD $0xc7724825f16206c7; QUAD $0x19c772481df1620b; QUAD $0x62cac925487d7362; QUAD $0x255362c5fe483d31; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162f0fe484dd162; QUAD $0x25f16202c372486d; QUAD $0x481df1620dc37248; QUAD $0x6f487e716216c372; QUAD $0xe8cd25485d7362cb; QUAD $0x6296d4254825d362; QUAD $0x6dd162d1fe486dd1; QUAD $0x6f487e7162d0fe48; QUAD $0xc4fe487551623662; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d3162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x7d516237626f487e; QUAD $0x6f487e7162c4fe48; QUAD $0x06c572482df162cd; QUAD $0x620bc5724825f162; QUAD $0x736219c572481df1; QUAD $0x3d3162cacf25484d; QUAD $0x2548255362c7fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x487df162e0fe485d; QUAD $0x724825f16202c172; QUAD $0xc172481df1620dc1; QUAD $0x62c96f487e716216; QUAD $0xd362e8cb25486d73; QUAD $0x7dd16296c4254825; QUAD $0xfe487dd162c1fe48; QUAD $0x38626f487e7162c0; QUAD $0x7162c4fe48455162; QUAD $0x482df162cc6f487e; QUAD $0x724825f16206c472; QUAD $0xc472481df1620bc4; QUAD $0xcace254855736219; QUAD $0x5362c0fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62d8fe4865d162c2; QUAD $0xf16202c0724845f1; QUAD $0x1df1620dc0724825; QUAD $0x487e716216c07248; QUAD $0xca2548757362c86f; QUAD $0x96fc254825d362e8; QUAD $0xd162f9fe4845d162; QUAD $0x487e7162f8fe4845; QUAD $0xfe484d516239626f; QUAD $0x62cb6f487e7162c4; QUAD $0xf16206c372482df1; QUAD $0x1df1620bc3724825; QUAD $0x485d736219c37248; QUAD $0xfe483d1162cacd25; QUAD $0x96d42548255362c1; QUAD $0x5162c1fe483d5162; QUAD $0x486dd162c2fe483d; QUAD $0xc772484df162d0fe; QUAD $0x0dc7724825f16202; QUAD $0x6216c772481df162; QUAD $0x7d7362cf6f487e71; QUAD $0x4825d362e8c92548; QUAD $0xfe484dd16296f425; QUAD $0x62f0fe484dd162f1; QUAD $0x51623a626f487e71; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x1162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x62c4fe485d51623b; QUAD $0x2df162c96f487e71; QUAD $0x4825f16206c17248; QUAD $0x72481df1620bc172; QUAD $0xcb25486d736219c1; QUAD $0x62c3fe483d1162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xc0fe487dd162c2fe; QUAD $0x6202c572485df162; QUAD $0xf1620dc5724825f1; QUAD $0x7e716216c572481d; QUAD $0x25484d7362cd6f48; QUAD $0xe4254825d362e8cf; QUAD $0x62e1fe485dd16296; QUAD $0x7e7162e0fe485dd1; QUAD $0x486551623c626f48; QUAD $0xc86f487e7162c4fe; QUAD $0x6206c072482df162; QUAD $0xf1620bc0724825f1; QUAD $0x75736219c072481d; QUAD $0x483d1162caca2548; QUAD $0xd42548255362c4fe; QUAD $0x62c1fe483d516296; QUAD $0x45d162c2fe483d51; QUAD $0x724865f162f8fe48; QUAD $0xc4724825f16202c4; QUAD $0x16c472481df1620d; QUAD $0x7362cc6f487e7162; QUAD $0x25d362e8ce254855; QUAD $0x4865d16296dc2548; QUAD $0xd8fe4865d162d9fe; QUAD $0x623d626f487e7162; QUAD $0x7e7162c4fe486d51; QUAD $0x72482df162cf6f48; QUAD $0xc7724825f16206c7; QUAD $0x19c772481df1620b; QUAD $0x62cac925487d7362; QUAD $0x255362c5fe483d11; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162f0fe484dd162; QUAD $0x25f16202c372486d; QUAD $0x481df1620dc37248; QUAD $0x6f487e716216c372; QUAD $0xe8cd25485d7362cb; QUAD $0x6296d4254825d362; QUAD $0x6dd162d1fe486dd1; QUAD $0x6f487e7162d0fe48; QUAD $0xc4fe487551623e62; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d1162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x7d51623f626f487e; QUAD $0x6f487e7162c4fe48; QUAD $0x06c572482df162cd; QUAD $0x620bc5724825f162; QUAD $0x736219c572481df1; QUAD $0x3d1162cacf25484d; QUAD $0x2548255362c7fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x487df162e0fe485d; QUAD $0x724825f16202c172; QUAD $0xc172481df1620dc1; QUAD $0x62c96f487e716216; QUAD $0xd362e8cb25486d73; QUAD $0x7dd16296c4254825; QUAD $0xfe487dd162c1fe48; QUAD $0x40626f487e7162c0; QUAD $0xd162d86f487e7162; QUAD $0x7dd16224046f487e; QUAD $0x6f487e7162c3fe49; QUAD $0x244c6f487ed162d9; QUAD $0x62cbfe4975d16201; QUAD $0x7ed162da6f487e71; QUAD $0x6dd1620224546f48; QUAD $0x6f487e7162d3fe49; QUAD $0x245c6f487ed162db; QUAD $0x62dbfe4965d16203; QUAD $0x7ed162dc6f487e71; QUAD $0x5dd1620424646f48; QUAD $0x6f487e7162e3fe49; QUAD $0x246c6f487ed162dd; QUAD $0x62ebfe4955d16205; QUAD $0x7ed162de6f487e71; QUAD $0x4dd1620624746f48; QUAD $0x6f487e7162f3fe49; QUAD $0x247c6f487ed162df; QUAD $0x62fbfe4945d16207; QUAD $0x7ef162077f487ef1; QUAD $0x487ef162014f7f48; QUAD $0x7f487ef16202577f; QUAD $0x677f487ef162035f; QUAD $0x056f7f487ef16204; QUAD $0x6206777f487ef162; LONG $0x7f487ef1; WORD $0x077f - VZEROUPPER - RET - -DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x000(SB)/8, $0x0405060700010203 -DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x008(SB)/8, $0x0c0d0e0f08090a0b -DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x010(SB)/8, $0x0405060700010203 -DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x018(SB)/8, $0x0c0d0e0f08090a0b -DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x020(SB)/8, $0x0405060700010203 -DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x028(SB)/8, $0x0c0d0e0f08090a0b -DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x030(SB)/8, $0x0405060700010203 -DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x038(SB)/8, $0x0c0d0e0f08090a0b -GLOBL PSHUFFLE_BYTE_FLIP_MASK<>(SB), 8, $64 -DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x000(SB)/8, $0x0000000000000000 -DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x008(SB)/8, $0x0000000000000001 -DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x010(SB)/8, $0x0000000000000008 -DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x018(SB)/8, $0x0000000000000009 -DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x020(SB)/8, $0x0000000000000004 -DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x028(SB)/8, $0x0000000000000005 -DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x030(SB)/8, $0x000000000000000C -DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x038(SB)/8, $0x000000000000000D -GLOBL PSHUFFLE_TRANSPOSE16_MASK1<>(SB), 8, $64 -DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x000(SB)/8, $0x0000000000000002 -DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x008(SB)/8, $0x0000000000000003 -DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x010(SB)/8, $0x000000000000000A -DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x018(SB)/8, $0x000000000000000B -DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x020(SB)/8, $0x0000000000000006 -DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x028(SB)/8, $0x0000000000000007 -DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x030(SB)/8, $0x000000000000000E -DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x038(SB)/8, $0x000000000000000F -GLOBL PSHUFFLE_TRANSPOSE16_MASK2<>(SB), 8, $64 diff --git a/vendor/github.com/minio/sha256-simd/sha256blockSha_amd64.go b/vendor/github.com/minio/sha256-simd/sha256blockSha_amd64.go deleted file mode 100644 index bef9494..0000000 --- a/vendor/github.com/minio/sha256-simd/sha256blockSha_amd64.go +++ /dev/null @@ -1,6 +0,0 @@ -//+build !noasm,!appengine,gc - -package sha256 - -//go:noescape -func blockSha(h *[8]uint32, message []uint8) diff --git a/vendor/github.com/minio/sha256-simd/sha256blockSha_amd64.s b/vendor/github.com/minio/sha256-simd/sha256blockSha_amd64.s deleted file mode 100644 index 14cf2c6..0000000 --- a/vendor/github.com/minio/sha256-simd/sha256blockSha_amd64.s +++ /dev/null @@ -1,266 +0,0 @@ -//+build !noasm,!appengine,gc - -// SHA intrinsic version of SHA256 - -// Kristofer Peterson, (C) 2018. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -#include "textflag.h" - -DATA K<>+0x00(SB)/4, $0x428a2f98 -DATA K<>+0x04(SB)/4, $0x71374491 -DATA K<>+0x08(SB)/4, $0xb5c0fbcf -DATA K<>+0x0c(SB)/4, $0xe9b5dba5 -DATA K<>+0x10(SB)/4, $0x3956c25b -DATA K<>+0x14(SB)/4, $0x59f111f1 -DATA K<>+0x18(SB)/4, $0x923f82a4 -DATA K<>+0x1c(SB)/4, $0xab1c5ed5 -DATA K<>+0x20(SB)/4, $0xd807aa98 -DATA K<>+0x24(SB)/4, $0x12835b01 -DATA K<>+0x28(SB)/4, $0x243185be -DATA K<>+0x2c(SB)/4, $0x550c7dc3 -DATA K<>+0x30(SB)/4, $0x72be5d74 -DATA K<>+0x34(SB)/4, $0x80deb1fe -DATA K<>+0x38(SB)/4, $0x9bdc06a7 -DATA K<>+0x3c(SB)/4, $0xc19bf174 -DATA K<>+0x40(SB)/4, $0xe49b69c1 -DATA K<>+0x44(SB)/4, $0xefbe4786 -DATA K<>+0x48(SB)/4, $0x0fc19dc6 -DATA K<>+0x4c(SB)/4, $0x240ca1cc -DATA K<>+0x50(SB)/4, $0x2de92c6f -DATA K<>+0x54(SB)/4, $0x4a7484aa -DATA K<>+0x58(SB)/4, $0x5cb0a9dc -DATA K<>+0x5c(SB)/4, $0x76f988da -DATA K<>+0x60(SB)/4, $0x983e5152 -DATA K<>+0x64(SB)/4, $0xa831c66d -DATA K<>+0x68(SB)/4, $0xb00327c8 -DATA K<>+0x6c(SB)/4, $0xbf597fc7 -DATA K<>+0x70(SB)/4, $0xc6e00bf3 -DATA K<>+0x74(SB)/4, $0xd5a79147 -DATA K<>+0x78(SB)/4, $0x06ca6351 -DATA K<>+0x7c(SB)/4, $0x14292967 -DATA K<>+0x80(SB)/4, $0x27b70a85 -DATA K<>+0x84(SB)/4, $0x2e1b2138 -DATA K<>+0x88(SB)/4, $0x4d2c6dfc -DATA K<>+0x8c(SB)/4, $0x53380d13 -DATA K<>+0x90(SB)/4, $0x650a7354 -DATA K<>+0x94(SB)/4, $0x766a0abb -DATA K<>+0x98(SB)/4, $0x81c2c92e -DATA K<>+0x9c(SB)/4, $0x92722c85 -DATA K<>+0xa0(SB)/4, $0xa2bfe8a1 -DATA K<>+0xa4(SB)/4, $0xa81a664b -DATA K<>+0xa8(SB)/4, $0xc24b8b70 -DATA K<>+0xac(SB)/4, $0xc76c51a3 -DATA K<>+0xb0(SB)/4, $0xd192e819 -DATA K<>+0xb4(SB)/4, $0xd6990624 -DATA K<>+0xb8(SB)/4, $0xf40e3585 -DATA K<>+0xbc(SB)/4, $0x106aa070 -DATA K<>+0xc0(SB)/4, $0x19a4c116 -DATA K<>+0xc4(SB)/4, $0x1e376c08 -DATA K<>+0xc8(SB)/4, $0x2748774c -DATA K<>+0xcc(SB)/4, $0x34b0bcb5 -DATA K<>+0xd0(SB)/4, $0x391c0cb3 -DATA K<>+0xd4(SB)/4, $0x4ed8aa4a -DATA K<>+0xd8(SB)/4, $0x5b9cca4f -DATA K<>+0xdc(SB)/4, $0x682e6ff3 -DATA K<>+0xe0(SB)/4, $0x748f82ee -DATA K<>+0xe4(SB)/4, $0x78a5636f -DATA K<>+0xe8(SB)/4, $0x84c87814 -DATA K<>+0xec(SB)/4, $0x8cc70208 -DATA K<>+0xf0(SB)/4, $0x90befffa -DATA K<>+0xf4(SB)/4, $0xa4506ceb -DATA K<>+0xf8(SB)/4, $0xbef9a3f7 -DATA K<>+0xfc(SB)/4, $0xc67178f2 -GLOBL K<>(SB), RODATA|NOPTR, $256 - -DATA SHUF_MASK<>+0x00(SB)/8, $0x0405060700010203 -DATA SHUF_MASK<>+0x08(SB)/8, $0x0c0d0e0f08090a0b -GLOBL SHUF_MASK<>(SB), RODATA|NOPTR, $16 - -// Register Usage -// BX base address of constant table (constant) -// DX hash_state (constant) -// SI hash_data.data -// DI hash_data.data + hash_data.length - 64 (constant) -// X0 scratch -// X1 scratch -// X2 working hash state // ABEF -// X3 working hash state // CDGH -// X4 first 16 bytes of block -// X5 second 16 bytes of block -// X6 third 16 bytes of block -// X7 fourth 16 bytes of block -// X12 saved hash state // ABEF -// X13 saved hash state // CDGH -// X15 data shuffle mask (constant) - -TEXT ·blockSha(SB), NOSPLIT, $0-32 - MOVQ h+0(FP), DX - MOVQ message_base+8(FP), SI - MOVQ message_len+16(FP), DI - LEAQ -64(SI)(DI*1), DI - MOVOU (DX), X2 - MOVOU 16(DX), X1 - MOVO X2, X3 - PUNPCKLLQ X1, X2 - PUNPCKHLQ X1, X3 - PSHUFD $0x27, X2, X2 - PSHUFD $0x27, X3, X3 - MOVO SHUF_MASK<>(SB), X15 - LEAQ K<>(SB), BX - - JMP TEST - -LOOP: - MOVO X2, X12 - MOVO X3, X13 - - // load block and shuffle - MOVOU (SI), X4 - MOVOU 16(SI), X5 - MOVOU 32(SI), X6 - MOVOU 48(SI), X7 - PSHUFB X15, X4 - PSHUFB X15, X5 - PSHUFB X15, X6 - PSHUFB X15, X7 - -#define ROUND456 \ - PADDL X5, X0 \ - LONG $0xdacb380f \ // SHA256RNDS2 XMM3, XMM2 - MOVO X5, X1 \ - LONG $0x0f3a0f66; WORD $0x04cc \ // PALIGNR XMM1, XMM4, 4 - PADDL X1, X6 \ - LONG $0xf5cd380f \ // SHA256MSG2 XMM6, XMM5 - PSHUFD $0x4e, X0, X0 \ - LONG $0xd3cb380f \ // SHA256RNDS2 XMM2, XMM3 - LONG $0xe5cc380f // SHA256MSG1 XMM4, XMM5 - -#define ROUND567 \ - PADDL X6, X0 \ - LONG $0xdacb380f \ // SHA256RNDS2 XMM3, XMM2 - MOVO X6, X1 \ - LONG $0x0f3a0f66; WORD $0x04cd \ // PALIGNR XMM1, XMM5, 4 - PADDL X1, X7 \ - LONG $0xfecd380f \ // SHA256MSG2 XMM7, XMM6 - PSHUFD $0x4e, X0, X0 \ - LONG $0xd3cb380f \ // SHA256RNDS2 XMM2, XMM3 - LONG $0xeecc380f // SHA256MSG1 XMM5, XMM6 - -#define ROUND674 \ - PADDL X7, X0 \ - LONG $0xdacb380f \ // SHA256RNDS2 XMM3, XMM2 - MOVO X7, X1 \ - LONG $0x0f3a0f66; WORD $0x04ce \ // PALIGNR XMM1, XMM6, 4 - PADDL X1, X4 \ - LONG $0xe7cd380f \ // SHA256MSG2 XMM4, XMM7 - PSHUFD $0x4e, X0, X0 \ - LONG $0xd3cb380f \ // SHA256RNDS2 XMM2, XMM3 - LONG $0xf7cc380f // SHA256MSG1 XMM6, XMM7 - -#define ROUND745 \ - PADDL X4, X0 \ - LONG $0xdacb380f \ // SHA256RNDS2 XMM3, XMM2 - MOVO X4, X1 \ - LONG $0x0f3a0f66; WORD $0x04cf \ // PALIGNR XMM1, XMM7, 4 - PADDL X1, X5 \ - LONG $0xeccd380f \ // SHA256MSG2 XMM5, XMM4 - PSHUFD $0x4e, X0, X0 \ - LONG $0xd3cb380f \ // SHA256RNDS2 XMM2, XMM3 - LONG $0xfccc380f // SHA256MSG1 XMM7, XMM4 - - // rounds 0-3 - MOVO (BX), X0 - PADDL X4, X0 - LONG $0xdacb380f // SHA256RNDS2 XMM3, XMM2 - PSHUFD $0x4e, X0, X0 - LONG $0xd3cb380f // SHA256RNDS2 XMM2, XMM3 - - // rounds 4-7 - MOVO 1*16(BX), X0 - PADDL X5, X0 - LONG $0xdacb380f // SHA256RNDS2 XMM3, XMM2 - PSHUFD $0x4e, X0, X0 - LONG $0xd3cb380f // SHA256RNDS2 XMM2, XMM3 - LONG $0xe5cc380f // SHA256MSG1 XMM4, XMM5 - - // rounds 8-11 - MOVO 2*16(BX), X0 - PADDL X6, X0 - LONG $0xdacb380f // SHA256RNDS2 XMM3, XMM2 - PSHUFD $0x4e, X0, X0 - LONG $0xd3cb380f // SHA256RNDS2 XMM2, XMM3 - LONG $0xeecc380f // SHA256MSG1 XMM5, XMM6 - - MOVO 3*16(BX), X0; ROUND674 // rounds 12-15 - MOVO 4*16(BX), X0; ROUND745 // rounds 16-19 - MOVO 5*16(BX), X0; ROUND456 // rounds 20-23 - MOVO 6*16(BX), X0; ROUND567 // rounds 24-27 - MOVO 7*16(BX), X0; ROUND674 // rounds 28-31 - MOVO 8*16(BX), X0; ROUND745 // rounds 32-35 - MOVO 9*16(BX), X0; ROUND456 // rounds 36-39 - MOVO 10*16(BX), X0; ROUND567 // rounds 40-43 - MOVO 11*16(BX), X0; ROUND674 // rounds 44-47 - MOVO 12*16(BX), X0; ROUND745 // rounds 48-51 - - // rounds 52-55 - MOVO 13*16(BX), X0 - PADDL X5, X0 - LONG $0xdacb380f // SHA256RNDS2 XMM3, XMM2 - MOVO X5, X1 - LONG $0x0f3a0f66; WORD $0x04cc // PALIGNR XMM1, XMM4, 4 - PADDL X1, X6 - LONG $0xf5cd380f // SHA256MSG2 XMM6, XMM5 - PSHUFD $0x4e, X0, X0 - LONG $0xd3cb380f // SHA256RNDS2 XMM2, XMM3 - - // rounds 56-59 - MOVO 14*16(BX), X0 - PADDL X6, X0 - LONG $0xdacb380f // SHA256RNDS2 XMM3, XMM2 - MOVO X6, X1 - LONG $0x0f3a0f66; WORD $0x04cd // PALIGNR XMM1, XMM5, 4 - PADDL X1, X7 - LONG $0xfecd380f // SHA256MSG2 XMM7, XMM6 - PSHUFD $0x4e, X0, X0 - LONG $0xd3cb380f // SHA256RNDS2 XMM2, XMM3 - - // rounds 60-63 - MOVO 15*16(BX), X0 - PADDL X7, X0 - LONG $0xdacb380f // SHA256RNDS2 XMM3, XMM2 - PSHUFD $0x4e, X0, X0 - LONG $0xd3cb380f // SHA256RNDS2 XMM2, XMM3 - - PADDL X12, X2 - PADDL X13, X3 - - ADDQ $64, SI - -TEST: - CMPQ SI, DI - JBE LOOP - - PSHUFD $0x4e, X3, X0 - LONG $0x0e3a0f66; WORD $0xf0c2 // PBLENDW XMM0, XMM2, 0xf0 - PSHUFD $0x4e, X2, X1 - LONG $0x0e3a0f66; WORD $0x0fcb // PBLENDW XMM1, XMM3, 0x0f - PSHUFD $0x1b, X0, X0 - PSHUFD $0x1b, X1, X1 - - MOVOU X0, (DX) - MOVOU X1, 16(DX) - - RET diff --git a/vendor/github.com/minio/sha256-simd/sha256block_amd64.go b/vendor/github.com/minio/sha256-simd/sha256block_amd64.go deleted file mode 100644 index 0c48d45..0000000 --- a/vendor/github.com/minio/sha256-simd/sha256block_amd64.go +++ /dev/null @@ -1,27 +0,0 @@ -//+build !noasm,!appengine,gc - -/* - * Minio Cloud Storage, (C) 2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sha256 - -func blockArmGo(dig *digest, p []byte) { - panic("blockArmGo called unexpectedly") -} - -func blockShaGo(dig *digest, p []byte) { - blockSha(&dig.h, p) -} diff --git a/vendor/github.com/minio/sha256-simd/sha256block_arm64.go b/vendor/github.com/minio/sha256-simd/sha256block_arm64.go deleted file mode 100644 index 58ccf6e..0000000 --- a/vendor/github.com/minio/sha256-simd/sha256block_arm64.go +++ /dev/null @@ -1,36 +0,0 @@ -//+build !noasm,!appengine,gc - -/* - * Minio Cloud Storage, (C) 2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sha256 - -func blockShaGo(dig *digest, p []byte) { - panic("blockShaGoc called unexpectedly") -} - -//go:noescape -func blockArm(h []uint32, message []uint8) - -func blockArmGo(dig *digest, p []byte) { - - h := []uint32{dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7]} - - blockArm(h[:], p[:]) - - dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] = h[0], h[1], h[2], h[3], h[4], - h[5], h[6], h[7] -} diff --git a/vendor/github.com/minio/sha256-simd/sha256block_arm64.s b/vendor/github.com/minio/sha256-simd/sha256block_arm64.s deleted file mode 100644 index d85170d..0000000 --- a/vendor/github.com/minio/sha256-simd/sha256block_arm64.s +++ /dev/null @@ -1,192 +0,0 @@ -//+build !noasm,!appengine,gc - -// ARM64 version of SHA256 - -// -// Minio Cloud Storage, (C) 2016 Minio, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -// -// Based on implementation as found in https://github.com/jocover/sha256-armv8 -// -// Use github.com/minio/asm2plan9s on this file to assemble ARM instructions to -// their Plan9 equivalents -// - -TEXT ·blockArm(SB), 7, $0 - MOVD h+0(FP), R0 - MOVD message+24(FP), R1 - MOVD message_len+32(FP), R2 // length of message - SUBS $64, R2 - BMI complete - - // Load constants table pointer - MOVD $·constants(SB), R3 - - // Cache constants table in registers v16 - v31 - WORD $0x4cdf2870 // ld1 {v16.4s-v19.4s}, [x3], #64 - WORD $0x4cdf7800 // ld1 {v0.4s}, [x0], #16 - WORD $0x4cdf2874 // ld1 {v20.4s-v23.4s}, [x3], #64 - - WORD $0x4c407801 // ld1 {v1.4s}, [x0] - WORD $0x4cdf2878 // ld1 {v24.4s-v27.4s}, [x3], #64 - WORD $0xd1004000 // sub x0, x0, #0x10 - WORD $0x4cdf287c // ld1 {v28.4s-v31.4s}, [x3], #64 - -loop: - // Main loop - WORD $0x4cdf2025 // ld1 {v5.16b-v8.16b}, [x1], #64 - WORD $0x4ea01c02 // mov v2.16b, v0.16b - WORD $0x4ea11c23 // mov v3.16b, v1.16b - WORD $0x6e2008a5 // rev32 v5.16b, v5.16b - WORD $0x6e2008c6 // rev32 v6.16b, v6.16b - WORD $0x4eb084a9 // add v9.4s, v5.4s, v16.4s - WORD $0x6e2008e7 // rev32 v7.16b, v7.16b - WORD $0x4eb184ca // add v10.4s, v6.4s, v17.4s - WORD $0x4ea21c44 // mov v4.16b, v2.16b - WORD $0x5e094062 // sha256h q2, q3, v9.4s - WORD $0x5e095083 // sha256h2 q3, q4, v9.4s - WORD $0x5e2828c5 // sha256su0 v5.4s, v6.4s - WORD $0x6e200908 // rev32 v8.16b, v8.16b - WORD $0x4eb284e9 // add v9.4s, v7.4s, v18.4s - WORD $0x4ea21c44 // mov v4.16b, v2.16b - WORD $0x5e0a4062 // sha256h q2, q3, v10.4s - WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s - WORD $0x5e2828e6 // sha256su0 v6.4s, v7.4s - WORD $0x5e0860e5 // sha256su1 v5.4s, v7.4s, v8.4s - WORD $0x4eb3850a // add v10.4s, v8.4s, v19.4s - WORD $0x4ea21c44 // mov v4.16b, v2.16b - WORD $0x5e094062 // sha256h q2, q3, v9.4s - WORD $0x5e095083 // sha256h2 q3, q4, v9.4s - WORD $0x5e282907 // sha256su0 v7.4s, v8.4s - WORD $0x5e056106 // sha256su1 v6.4s, v8.4s, v5.4s - WORD $0x4eb484a9 // add v9.4s, v5.4s, v20.4s - WORD $0x4ea21c44 // mov v4.16b, v2.16b - WORD $0x5e0a4062 // sha256h q2, q3, v10.4s - WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s - WORD $0x5e2828a8 // sha256su0 v8.4s, v5.4s - WORD $0x5e0660a7 // sha256su1 v7.4s, v5.4s, v6.4s - WORD $0x4eb584ca // add v10.4s, v6.4s, v21.4s - WORD $0x4ea21c44 // mov v4.16b, v2.16b - WORD $0x5e094062 // sha256h q2, q3, v9.4s - WORD $0x5e095083 // sha256h2 q3, q4, v9.4s - WORD $0x5e2828c5 // sha256su0 v5.4s, v6.4s - WORD $0x5e0760c8 // sha256su1 v8.4s, v6.4s, v7.4s - WORD $0x4eb684e9 // add v9.4s, v7.4s, v22.4s - WORD $0x4ea21c44 // mov v4.16b, v2.16b - WORD $0x5e0a4062 // sha256h q2, q3, v10.4s - WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s - WORD $0x5e2828e6 // sha256su0 v6.4s, v7.4s - WORD $0x5e0860e5 // sha256su1 v5.4s, v7.4s, v8.4s - WORD $0x4eb7850a // add v10.4s, v8.4s, v23.4s - WORD $0x4ea21c44 // mov v4.16b, v2.16b - WORD $0x5e094062 // sha256h q2, q3, v9.4s - WORD $0x5e095083 // sha256h2 q3, q4, v9.4s - WORD $0x5e282907 // sha256su0 v7.4s, v8.4s - WORD $0x5e056106 // sha256su1 v6.4s, v8.4s, v5.4s - WORD $0x4eb884a9 // add v9.4s, v5.4s, v24.4s - WORD $0x4ea21c44 // mov v4.16b, v2.16b - WORD $0x5e0a4062 // sha256h q2, q3, v10.4s - WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s - WORD $0x5e2828a8 // sha256su0 v8.4s, v5.4s - WORD $0x5e0660a7 // sha256su1 v7.4s, v5.4s, v6.4s - WORD $0x4eb984ca // add v10.4s, v6.4s, v25.4s - WORD $0x4ea21c44 // mov v4.16b, v2.16b - WORD $0x5e094062 // sha256h q2, q3, v9.4s - WORD $0x5e095083 // sha256h2 q3, q4, v9.4s - WORD $0x5e2828c5 // sha256su0 v5.4s, v6.4s - WORD $0x5e0760c8 // sha256su1 v8.4s, v6.4s, v7.4s - WORD $0x4eba84e9 // add v9.4s, v7.4s, v26.4s - WORD $0x4ea21c44 // mov v4.16b, v2.16b - WORD $0x5e0a4062 // sha256h q2, q3, v10.4s - WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s - WORD $0x5e2828e6 // sha256su0 v6.4s, v7.4s - WORD $0x5e0860e5 // sha256su1 v5.4s, v7.4s, v8.4s - WORD $0x4ebb850a // add v10.4s, v8.4s, v27.4s - WORD $0x4ea21c44 // mov v4.16b, v2.16b - WORD $0x5e094062 // sha256h q2, q3, v9.4s - WORD $0x5e095083 // sha256h2 q3, q4, v9.4s - WORD $0x5e282907 // sha256su0 v7.4s, v8.4s - WORD $0x5e056106 // sha256su1 v6.4s, v8.4s, v5.4s - WORD $0x4ebc84a9 // add v9.4s, v5.4s, v28.4s - WORD $0x4ea21c44 // mov v4.16b, v2.16b - WORD $0x5e0a4062 // sha256h q2, q3, v10.4s - WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s - WORD $0x5e2828a8 // sha256su0 v8.4s, v5.4s - WORD $0x5e0660a7 // sha256su1 v7.4s, v5.4s, v6.4s - WORD $0x4ebd84ca // add v10.4s, v6.4s, v29.4s - WORD $0x4ea21c44 // mov v4.16b, v2.16b - WORD $0x5e094062 // sha256h q2, q3, v9.4s - WORD $0x5e095083 // sha256h2 q3, q4, v9.4s - WORD $0x5e0760c8 // sha256su1 v8.4s, v6.4s, v7.4s - WORD $0x4ebe84e9 // add v9.4s, v7.4s, v30.4s - WORD $0x4ea21c44 // mov v4.16b, v2.16b - WORD $0x5e0a4062 // sha256h q2, q3, v10.4s - WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s - WORD $0x4ebf850a // add v10.4s, v8.4s, v31.4s - WORD $0x4ea21c44 // mov v4.16b, v2.16b - WORD $0x5e094062 // sha256h q2, q3, v9.4s - WORD $0x5e095083 // sha256h2 q3, q4, v9.4s - WORD $0x4ea21c44 // mov v4.16b, v2.16b - WORD $0x5e0a4062 // sha256h q2, q3, v10.4s - WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s - WORD $0x4ea38421 // add v1.4s, v1.4s, v3.4s - WORD $0x4ea28400 // add v0.4s, v0.4s, v2.4s - - SUBS $64, R2 - BPL loop - - // Store result - WORD $0x4c00a800 // st1 {v0.4s, v1.4s}, [x0] - -complete: - RET - -// Constants table -DATA ·constants+0x0(SB)/8, $0x71374491428a2f98 -DATA ·constants+0x8(SB)/8, $0xe9b5dba5b5c0fbcf -DATA ·constants+0x10(SB)/8, $0x59f111f13956c25b -DATA ·constants+0x18(SB)/8, $0xab1c5ed5923f82a4 -DATA ·constants+0x20(SB)/8, $0x12835b01d807aa98 -DATA ·constants+0x28(SB)/8, $0x550c7dc3243185be -DATA ·constants+0x30(SB)/8, $0x80deb1fe72be5d74 -DATA ·constants+0x38(SB)/8, $0xc19bf1749bdc06a7 -DATA ·constants+0x40(SB)/8, $0xefbe4786e49b69c1 -DATA ·constants+0x48(SB)/8, $0x240ca1cc0fc19dc6 -DATA ·constants+0x50(SB)/8, $0x4a7484aa2de92c6f -DATA ·constants+0x58(SB)/8, $0x76f988da5cb0a9dc -DATA ·constants+0x60(SB)/8, $0xa831c66d983e5152 -DATA ·constants+0x68(SB)/8, $0xbf597fc7b00327c8 -DATA ·constants+0x70(SB)/8, $0xd5a79147c6e00bf3 -DATA ·constants+0x78(SB)/8, $0x1429296706ca6351 -DATA ·constants+0x80(SB)/8, $0x2e1b213827b70a85 -DATA ·constants+0x88(SB)/8, $0x53380d134d2c6dfc -DATA ·constants+0x90(SB)/8, $0x766a0abb650a7354 -DATA ·constants+0x98(SB)/8, $0x92722c8581c2c92e -DATA ·constants+0xa0(SB)/8, $0xa81a664ba2bfe8a1 -DATA ·constants+0xa8(SB)/8, $0xc76c51a3c24b8b70 -DATA ·constants+0xb0(SB)/8, $0xd6990624d192e819 -DATA ·constants+0xb8(SB)/8, $0x106aa070f40e3585 -DATA ·constants+0xc0(SB)/8, $0x1e376c0819a4c116 -DATA ·constants+0xc8(SB)/8, $0x34b0bcb52748774c -DATA ·constants+0xd0(SB)/8, $0x4ed8aa4a391c0cb3 -DATA ·constants+0xd8(SB)/8, $0x682e6ff35b9cca4f -DATA ·constants+0xe0(SB)/8, $0x78a5636f748f82ee -DATA ·constants+0xe8(SB)/8, $0x8cc7020884c87814 -DATA ·constants+0xf0(SB)/8, $0xa4506ceb90befffa -DATA ·constants+0xf8(SB)/8, $0xc67178f2bef9a3f7 - -GLOBL ·constants(SB), 8, $256 - diff --git a/vendor/github.com/minio/sha256-simd/sha256block_other.go b/vendor/github.com/minio/sha256-simd/sha256block_other.go deleted file mode 100644 index ec586c0..0000000 --- a/vendor/github.com/minio/sha256-simd/sha256block_other.go +++ /dev/null @@ -1,28 +0,0 @@ -//+build appengine noasm !amd64,!arm64 !gc - -/* - * Minio Cloud Storage, (C) 2019 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sha256 - -func blockShaGo(dig *digest, p []byte) { - panic("blockShaGo called unexpectedly") - -} - -func blockArmGo(dig *digest, p []byte) { - panic("blockArmGo called unexpectedly") -} diff --git a/vendor/github.com/minio/sha256-simd/test-architectures.sh b/vendor/github.com/minio/sha256-simd/test-architectures.sh deleted file mode 100644 index 50150ea..0000000 --- a/vendor/github.com/minio/sha256-simd/test-architectures.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/sh - -set -e - -go tool dist list | while IFS=/ read os arch; do - echo "Checking $os/$arch..." - echo " normal" - GOARCH=$arch GOOS=$os go build -o /dev/null ./... - echo " noasm" - GOARCH=$arch GOOS=$os go build -tags noasm -o /dev/null ./... - echo " appengine" - GOARCH=$arch GOOS=$os go build -tags appengine -o /dev/null ./... - echo " noasm,appengine" - GOARCH=$arch GOOS=$os go build -tags 'appengine noasm' -o /dev/null ./... -done diff --git a/vendor/github.com/mr-tron/base58/LICENSE b/vendor/github.com/mr-tron/base58/LICENSE deleted file mode 100644 index cb7829a..0000000 --- a/vendor/github.com/mr-tron/base58/LICENSE +++ /dev/null @@ -1,23 +0,0 @@ -MIT License - -Copyright (c) 2017 Denis Subbotin -Copyright (c) 2017 Nika Jones -Copyright (c) 2017 Philip Schlump - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/mr-tron/base58/base58/DEPRECATED.md b/vendor/github.com/mr-tron/base58/base58/DEPRECATED.md deleted file mode 100644 index 0cc7ec7..0000000 --- a/vendor/github.com/mr-tron/base58/base58/DEPRECATED.md +++ /dev/null @@ -1,4 +0,0 @@ -Files from this directory was copied to level up directory -========================================================== - -Now all development will be on top level \ No newline at end of file diff --git a/vendor/github.com/mr-tron/base58/base58/alphabet.go b/vendor/github.com/mr-tron/base58/base58/alphabet.go deleted file mode 100644 index a0f8878..0000000 --- a/vendor/github.com/mr-tron/base58/base58/alphabet.go +++ /dev/null @@ -1,31 +0,0 @@ -package base58 - -// Alphabet is a a b58 alphabet. -type Alphabet struct { - decode [128]int8 - encode [58]byte -} - -// NewAlphabet creates a new alphabet from the passed string. -// -// It panics if the passed string is not 58 bytes long or isn't valid ASCII. -func NewAlphabet(s string) *Alphabet { - if len(s) != 58 { - panic("base58 alphabets must be 58 bytes long") - } - ret := new(Alphabet) - copy(ret.encode[:], s) - for i := range ret.decode { - ret.decode[i] = -1 - } - for i, b := range ret.encode { - ret.decode[b] = int8(i) - } - return ret -} - -// BTCAlphabet is the bitcoin base58 alphabet. -var BTCAlphabet = NewAlphabet("123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz") - -// FlickrAlphabet is the flickr base58 alphabet. -var FlickrAlphabet = NewAlphabet("123456789abcdefghijkmnopqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ") diff --git a/vendor/github.com/mr-tron/base58/base58/base58.go b/vendor/github.com/mr-tron/base58/base58/base58.go deleted file mode 100644 index 0bbdfc0..0000000 --- a/vendor/github.com/mr-tron/base58/base58/base58.go +++ /dev/null @@ -1,261 +0,0 @@ -package base58 - -import ( - "fmt" - "math/big" -) - -var ( - bn0 = big.NewInt(0) - bn58 = big.NewInt(58) -) - -// Encode encodes the passed bytes into a base58 encoded string. -func Encode(bin []byte) string { - return FastBase58Encoding(bin) -} - -// EncodeAlphabet encodes the passed bytes into a base58 encoded string with the -// passed alphabet. -func EncodeAlphabet(bin []byte, alphabet *Alphabet) string { - return FastBase58EncodingAlphabet(bin, alphabet) -} - -// FastBase58Encoding encodes the passed bytes into a base58 encoded string. -func FastBase58Encoding(bin []byte) string { - return FastBase58EncodingAlphabet(bin, BTCAlphabet) -} - -// FastBase58EncodingAlphabet encodes the passed bytes into a base58 encoded -// string with the passed alphabet. -func FastBase58EncodingAlphabet(bin []byte, alphabet *Alphabet) string { - zero := alphabet.encode[0] - - binsz := len(bin) - var i, j, zcount, high int - var carry uint32 - - for zcount < binsz && bin[zcount] == 0 { - zcount++ - } - - size := ((binsz-zcount)*138/100 + 1) - - // allocate one big buffer up front - buf := make([]byte, size*2+zcount) - - // use the second half for the temporary buffer - tmp := buf[size+zcount:] - - high = size - 1 - for i = zcount; i < binsz; i++ { - j = size - 1 - for carry = uint32(bin[i]); j > high || carry != 0; j-- { - carry = carry + 256*uint32(tmp[j]) - tmp[j] = byte(carry % 58) - carry /= 58 - } - high = j - } - - for j = 0; j < size && tmp[j] == 0; j++ { - } - - // Use the first half for the result - b58 := buf[:size-j+zcount] - - if zcount != 0 { - for i = 0; i < zcount; i++ { - b58[i] = zero - } - } - - for i = zcount; j < size; i++ { - b58[i] = alphabet.encode[tmp[j]] - j++ - } - - return string(b58) -} - -// TrivialBase58Encoding encodes the passed bytes into a base58 encoded string -// (inefficiently). -func TrivialBase58Encoding(a []byte) string { - return TrivialBase58EncodingAlphabet(a, BTCAlphabet) -} - -// TrivialBase58EncodingAlphabet encodes the passed bytes into a base58 encoded -// string (inefficiently) with the passed alphabet. -func TrivialBase58EncodingAlphabet(a []byte, alphabet *Alphabet) string { - zero := alphabet.encode[0] - idx := len(a)*138/100 + 1 - buf := make([]byte, idx) - bn := new(big.Int).SetBytes(a) - var mo *big.Int - for bn.Cmp(bn0) != 0 { - bn, mo = bn.DivMod(bn, bn58, new(big.Int)) - idx-- - buf[idx] = alphabet.encode[mo.Int64()] - } - for i := range a { - if a[i] != 0 { - break - } - idx-- - buf[idx] = zero - } - return string(buf[idx:]) -} - -// Decode decodes the base58 encoded bytes. -func Decode(str string) ([]byte, error) { - return FastBase58Decoding(str) -} - -// DecodeAlphabet decodes the base58 encoded bytes using the given b58 alphabet. -func DecodeAlphabet(str string, alphabet *Alphabet) ([]byte, error) { - return FastBase58DecodingAlphabet(str, alphabet) -} - -// FastBase58Decoding decodes the base58 encoded bytes. -func FastBase58Decoding(str string) ([]byte, error) { - return FastBase58DecodingAlphabet(str, BTCAlphabet) -} - -// FastBase58DecodingAlphabet decodes the base58 encoded bytes using the given -// b58 alphabet. -func FastBase58DecodingAlphabet(str string, alphabet *Alphabet) ([]byte, error) { - if len(str) == 0 { - return nil, fmt.Errorf("zero length string") - } - - var ( - t uint64 - zmask, c uint32 - zcount int - - b58u = []rune(str) - b58sz = len(b58u) - - outisz = (b58sz + 3) / 4 // check to see if we need to change this buffer size to optimize - binu = make([]byte, (b58sz+3)*3) - bytesleft = b58sz % 4 - - zero = rune(alphabet.encode[0]) - ) - - if bytesleft > 0 { - zmask = (0xffffffff << uint32(bytesleft*8)) - } else { - bytesleft = 4 - } - - var outi = make([]uint32, outisz) - - for i := 0; i < b58sz && b58u[i] == zero; i++ { - zcount++ - } - - for _, r := range b58u { - if r > 127 { - return nil, fmt.Errorf("High-bit set on invalid digit") - } - if alphabet.decode[r] == -1 { - return nil, fmt.Errorf("Invalid base58 digit (%q)", r) - } - - c = uint32(alphabet.decode[r]) - - for j := (outisz - 1); j >= 0; j-- { - t = uint64(outi[j])*58 + uint64(c) - c = uint32(t>>32) & 0x3f - outi[j] = uint32(t & 0xffffffff) - } - - if c > 0 { - return nil, fmt.Errorf("Output number too big (carry to the next int32)") - } - - if outi[0]&zmask != 0 { - return nil, fmt.Errorf("Output number too big (last int32 filled too far)") - } - } - - // the nested for-loop below is the same as the original code: - // switch (bytesleft) { - // case 3: - // *(binu++) = (outi[0] & 0xff0000) >> 16; - // //-fallthrough - // case 2: - // *(binu++) = (outi[0] & 0xff00) >> 8; - // //-fallthrough - // case 1: - // *(binu++) = (outi[0] & 0xff); - // ++j; - // //-fallthrough - // default: - // break; - // } - // - // for (; j < outisz; ++j) - // { - // *(binu++) = (outi[j] >> 0x18) & 0xff; - // *(binu++) = (outi[j] >> 0x10) & 0xff; - // *(binu++) = (outi[j] >> 8) & 0xff; - // *(binu++) = (outi[j] >> 0) & 0xff; - // } - var j, cnt int - for j, cnt = 0, 0; j < outisz; j++ { - for mask := byte(bytesleft-1) * 8; mask <= 0x18; mask, cnt = mask-8, cnt+1 { - binu[cnt] = byte(outi[j] >> mask) - } - if j == 0 { - bytesleft = 4 // because it could be less than 4 the first time through - } - } - - for n, v := range binu { - if v > 0 { - start := n - zcount - if start < 0 { - start = 0 - } - return binu[start:cnt], nil - } - } - return binu[:cnt], nil -} - -// TrivialBase58Decoding decodes the base58 encoded bytes (inefficiently). -func TrivialBase58Decoding(str string) ([]byte, error) { - return TrivialBase58DecodingAlphabet(str, BTCAlphabet) -} - -// TrivialBase58DecodingAlphabet decodes the base58 encoded bytes -// (inefficiently) using the given b58 alphabet. -func TrivialBase58DecodingAlphabet(str string, alphabet *Alphabet) ([]byte, error) { - zero := alphabet.encode[0] - - var zcount int - for i := 0; i < len(str) && str[i] == zero; i++ { - zcount++ - } - leading := make([]byte, zcount) - - var padChar rune = -1 - src := []byte(str) - j := 0 - for ; j < len(src) && src[j] == byte(padChar); j++ { - } - - n := new(big.Int) - for i := range src[j:] { - c := alphabet.decode[src[i]] - if c == -1 { - return nil, fmt.Errorf("illegal base58 data at input index: %d", i) - } - n.Mul(n, bn58) - n.Add(n, big.NewInt(int64(c))) - } - return append(leading, n.Bytes()...), nil -} diff --git a/vendor/github.com/multiformats/go-base32/LICENSE b/vendor/github.com/multiformats/go-base32/LICENSE deleted file mode 100644 index 6a66aea..0000000 --- a/vendor/github.com/multiformats/go-base32/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/multiformats/go-base32/base32.go b/vendor/github.com/multiformats/go-base32/base32.go deleted file mode 100644 index 768a235..0000000 --- a/vendor/github.com/multiformats/go-base32/base32.go +++ /dev/null @@ -1,505 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package base32 implements base32 encoding as specified by RFC 4648. -package base32 - -import ( - "io" - "strconv" -) - -/* - * Encodings - */ - -// An Encoding is a radix 32 encoding/decoding scheme, defined by a -// 32-character alphabet. The most common is the "base32" encoding -// introduced for SASL GSSAPI and standardized in RFC 4648. -// The alternate "base32hex" encoding is used in DNSSEC. -type Encoding struct { - encode string - decodeMap [256]byte - padChar rune -} - -// Alphabet returns the Base32 alphabet used -func (enc *Encoding) Alphabet() string { - return enc.encode -} - -const ( - StdPadding rune = '=' - NoPadding rune = -1 -) - -const encodeStd = "ABCDEFGHIJKLMNOPQRSTUVWXYZ234567" -const encodeHex = "0123456789ABCDEFGHIJKLMNOPQRSTUV" - -// NewEncoding returns a new Encoding defined by the given alphabet, -// which must be a 32-byte string. -func NewEncoding(encoder string) *Encoding { - e := new(Encoding) - e.padChar = StdPadding - e.encode = encoder - for i := 0; i < len(e.decodeMap); i++ { - e.decodeMap[i] = 0xFF - } - for i := 0; i < len(encoder); i++ { - e.decodeMap[encoder[i]] = byte(i) - } - return e -} - -// NewEncoding returns a new case insensitive Encoding defined by the -// given alphabet, which must be a 32-byte string. -func NewEncodingCI(encoder string) *Encoding { - e := new(Encoding) - e.padChar = StdPadding - e.encode = encoder - for i := 0; i < len(e.decodeMap); i++ { - e.decodeMap[i] = 0xFF - } - for i := 0; i < len(encoder); i++ { - e.decodeMap[asciiToLower(encoder[i])] = byte(i) - e.decodeMap[asciiToUpper(encoder[i])] = byte(i) - } - return e -} - -func asciiToLower(c byte) byte { - if c >= 'A' && c <= 'Z' { - return c + 32 - } - return c -} - -func asciiToUpper(c byte) byte { - if c >= 'a' && c <= 'z' { - return c - 32 - } - return c -} - -// WithPadding creates a new encoding identical to enc except -// with a specified padding character, or NoPadding to disable padding. -func (enc Encoding) WithPadding(padding rune) *Encoding { - enc.padChar = padding - return &enc -} - -// StdEncoding is the standard base32 encoding, as defined in -// RFC 4648. -var StdEncoding = NewEncodingCI(encodeStd) - -// HexEncoding is the ``Extended Hex Alphabet'' defined in RFC 4648. -// It is typically used in DNS. -var HexEncoding = NewEncodingCI(encodeHex) - -var RawStdEncoding = NewEncodingCI(encodeStd).WithPadding(NoPadding) -var RawHexEncoding = NewEncodingCI(encodeHex).WithPadding(NoPadding) - -/* - * Encoder - */ - -// Encode encodes src using the encoding enc, writing -// EncodedLen(len(src)) bytes to dst. -// -// The encoding pads the output to a multiple of 8 bytes, -// so Encode is not appropriate for use on individual blocks -// of a large data stream. Use NewEncoder() instead. -func (enc *Encoding) Encode(dst, src []byte) { - if len(src) == 0 { - return - } - - for len(src) > 0 { - var carry byte - - // Unpack 8x 5-bit source blocks into a 5 byte - // destination quantum - switch len(src) { - default: - dst[7] = enc.encode[src[4]&0x1F] - carry = src[4] >> 5 - fallthrough - case 4: - dst[6] = enc.encode[carry|(src[3]<<3)&0x1F] - dst[5] = enc.encode[(src[3]>>2)&0x1F] - carry = src[3] >> 7 - fallthrough - case 3: - dst[4] = enc.encode[carry|(src[2]<<1)&0x1F] - carry = (src[2] >> 4) & 0x1F - fallthrough - case 2: - dst[3] = enc.encode[carry|(src[1]<<4)&0x1F] - dst[2] = enc.encode[(src[1]>>1)&0x1F] - carry = (src[1] >> 6) & 0x1F - fallthrough - case 1: - dst[1] = enc.encode[carry|(src[0]<<2)&0x1F] - dst[0] = enc.encode[src[0]>>3] - } - - // Pad the final quantum - if len(src) < 5 { - if enc.padChar != NoPadding { - dst[7] = byte(enc.padChar) - if len(src) < 4 { - dst[6] = byte(enc.padChar) - dst[5] = byte(enc.padChar) - if len(src) < 3 { - dst[4] = byte(enc.padChar) - if len(src) < 2 { - dst[3] = byte(enc.padChar) - dst[2] = byte(enc.padChar) - } - } - } - } - break - } - src = src[5:] - dst = dst[8:] - } -} - -// EncodeToString returns the base32 encoding of src. -func (enc *Encoding) EncodeToString(src []byte) string { - buf := make([]byte, enc.EncodedLen(len(src))) - enc.Encode(buf, src) - return string(buf) -} - -type encoder struct { - err error - enc *Encoding - w io.Writer - buf [5]byte // buffered data waiting to be encoded - nbuf int // number of bytes in buf - out [1024]byte // output buffer -} - -func (e *encoder) Write(p []byte) (n int, err error) { - if e.err != nil { - return 0, e.err - } - - // Leading fringe. - if e.nbuf > 0 { - var i int - for i = 0; i < len(p) && e.nbuf < 5; i++ { - e.buf[e.nbuf] = p[i] - e.nbuf++ - } - n += i - p = p[i:] - if e.nbuf < 5 { - return - } - e.enc.Encode(e.out[0:], e.buf[0:]) - if _, e.err = e.w.Write(e.out[0:8]); e.err != nil { - return n, e.err - } - e.nbuf = 0 - } - - // Large interior chunks. - for len(p) >= 5 { - nn := len(e.out) / 8 * 5 - if nn > len(p) { - nn = len(p) - nn -= nn % 5 - } - e.enc.Encode(e.out[0:], p[0:nn]) - if _, e.err = e.w.Write(e.out[0 : nn/5*8]); e.err != nil { - return n, e.err - } - n += nn - p = p[nn:] - } - - // Trailing fringe. - for i := 0; i < len(p); i++ { - e.buf[i] = p[i] - } - e.nbuf = len(p) - n += len(p) - return -} - -// Close flushes any pending output from the encoder. -// It is an error to call Write after calling Close. -func (e *encoder) Close() error { - // If there's anything left in the buffer, flush it out - if e.err == nil && e.nbuf > 0 { - e.enc.Encode(e.out[0:], e.buf[0:e.nbuf]) - e.nbuf = 0 - _, e.err = e.w.Write(e.out[0:8]) - } - return e.err -} - -// NewEncoder returns a new base32 stream encoder. Data written to -// the returned writer will be encoded using enc and then written to w. -// Base32 encodings operate in 5-byte blocks; when finished -// writing, the caller must Close the returned encoder to flush any -// partially written blocks. -func NewEncoder(enc *Encoding, w io.Writer) io.WriteCloser { - return &encoder{enc: enc, w: w} -} - -// EncodedLen returns the length in bytes of the base32 encoding -// of an input buffer of length n. -func (enc *Encoding) EncodedLen(n int) int { - if enc.padChar == NoPadding { - return (n*8 + 4) / 5 // minimum # chars at 5 bits per char - } - return (n + 4) / 5 * 8 -} - -/* - * Decoder - */ - -type CorruptInputError int64 - -func (e CorruptInputError) Error() string { - return "illegal base32 data at input byte " + strconv.FormatInt(int64(e), 10) -} - -// decode is like Decode but returns an additional 'end' value, which -// indicates if end-of-message padding was encountered and thus any -// additional data is an error. This method assumes that src has been -// stripped of all supported whitespace ('\r' and '\n'). -func (enc *Encoding) decode(dst, src []byte) (n int, end bool, err error) { - olen := len(src) - for len(src) > 0 && !end { - // Decode quantum using the base32 alphabet - var dbuf [8]byte - dlen := 8 - - for j := 0; j < 8; { - if len(src) == 0 { - if enc.padChar != NoPadding { - return n, false, CorruptInputError(olen - len(src) - j) - } - dlen = j - break - } - in := src[0] - src = src[1:] - if in == byte(enc.padChar) && j >= 2 && len(src) < 8 { - if enc.padChar == NoPadding { - return n, false, CorruptInputError(olen) - } - - // We've reached the end and there's padding - if len(src)+j < 8-1 { - // not enough padding - return n, false, CorruptInputError(olen) - } - for k := 0; k < 8-1-j; k++ { - if len(src) > k && src[k] != byte(enc.padChar) { - // incorrect padding - return n, false, CorruptInputError(olen - len(src) + k - 1) - } - } - dlen, end = j, true - // 7, 5 and 2 are not valid padding lengths, and so 1, 3 and 6 are not - // valid dlen values. See RFC 4648 Section 6 "Base 32 Encoding" listing - // the five valid padding lengths, and Section 9 "Illustrations and - // Examples" for an illustration for how the 1st, 3rd and 6th base32 - // src bytes do not yield enough information to decode a dst byte. - if dlen == 1 || dlen == 3 || dlen == 6 { - return n, false, CorruptInputError(olen - len(src) - 1) - } - break - } - dbuf[j] = enc.decodeMap[in] - if dbuf[j] == 0xFF { - return n, false, CorruptInputError(olen - len(src) - 1) - } - j++ - } - - // Pack 8x 5-bit source blocks into 5 byte destination - // quantum - switch dlen { - case 8: - dst[4] = dbuf[6]<<5 | dbuf[7] - fallthrough - case 7: - dst[3] = dbuf[4]<<7 | dbuf[5]<<2 | dbuf[6]>>3 - fallthrough - case 5: - dst[2] = dbuf[3]<<4 | dbuf[4]>>1 - fallthrough - case 4: - dst[1] = dbuf[1]<<6 | dbuf[2]<<1 | dbuf[3]>>4 - fallthrough - case 2: - dst[0] = dbuf[0]<<3 | dbuf[1]>>2 - } - - if len(dst) > 5 { - dst = dst[5:] - } - - switch dlen { - case 2: - n += 1 - case 4: - n += 2 - case 5: - n += 3 - case 7: - n += 4 - case 8: - n += 5 - } - } - return n, end, nil -} - -// Decode decodes src using the encoding enc. It writes at most -// DecodedLen(len(src)) bytes to dst and returns the number of bytes -// written. If src contains invalid base32 data, it will return the -// number of bytes successfully written and CorruptInputError. -// New line characters (\r and \n) are ignored. -func (enc *Encoding) Decode(dst, s []byte) (n int, err error) { - // FIXME: if dst is the same as s use decodeInPlace - stripped := make([]byte, 0, len(s)) - for _, c := range s { - if c != '\r' && c != '\n' { - stripped = append(stripped, c) - } - } - n, _, err = enc.decode(dst, stripped) - return -} - -func (enc *Encoding) decodeInPlace(strb []byte) (n int, err error) { - off := 0 - for _, b := range strb { - if b == '\n' || b == '\r' { - continue - } - strb[off] = b - off++ - } - n, _, err = enc.decode(strb, strb[:off]) - return -} - -// DecodeString returns the bytes represented by the base32 string s. -func (enc *Encoding) DecodeString(s string) ([]byte, error) { - strb := []byte(s) - n, err := enc.decodeInPlace(strb) - if err != nil { - return nil, err - } - return strb[:n], nil -} - -type decoder struct { - err error - enc *Encoding - r io.Reader - end bool // saw end of message - buf [1024]byte // leftover input - nbuf int - out []byte // leftover decoded output - outbuf [1024 / 8 * 5]byte -} - -func (d *decoder) Read(p []byte) (n int, err error) { - if d.err != nil { - return 0, d.err - } - - // Use leftover decoded output from last read. - if len(d.out) > 0 { - n = copy(p, d.out) - d.out = d.out[n:] - return n, nil - } - - // Read a chunk. - nn := len(p) / 5 * 8 - if nn < 8 { - nn = 8 - } - if nn > len(d.buf) { - nn = len(d.buf) - } - nn, d.err = io.ReadAtLeast(d.r, d.buf[d.nbuf:nn], 8-d.nbuf) - d.nbuf += nn - if d.nbuf < 8 { - return 0, d.err - } - - // Decode chunk into p, or d.out and then p if p is too small. - nr := d.nbuf / 8 * 8 - nw := d.nbuf / 8 * 5 - if nw > len(p) { - nw, d.end, d.err = d.enc.decode(d.outbuf[0:], d.buf[0:nr]) - d.out = d.outbuf[0:nw] - n = copy(p, d.out) - d.out = d.out[n:] - } else { - n, d.end, d.err = d.enc.decode(p, d.buf[0:nr]) - } - d.nbuf -= nr - for i := 0; i < d.nbuf; i++ { - d.buf[i] = d.buf[i+nr] - } - - if d.err == nil { - d.err = err - } - return n, d.err -} - -type newlineFilteringReader struct { - wrapped io.Reader -} - -func (r *newlineFilteringReader) Read(p []byte) (int, error) { - n, err := r.wrapped.Read(p) - for n > 0 { - offset := 0 - for i, b := range p[0:n] { - if b != '\r' && b != '\n' { - if i != offset { - p[offset] = b - } - offset++ - } - } - if offset > 0 { - return offset, err - } - // Previous buffer entirely whitespace, read again - n, err = r.wrapped.Read(p) - } - return n, err -} - -// NewDecoder constructs a new base32 stream decoder. -func NewDecoder(enc *Encoding, r io.Reader) io.Reader { - return &decoder{enc: enc, r: &newlineFilteringReader{r}} -} - -// DecodedLen returns the maximum length in bytes of the decoded data -// corresponding to n bytes of base32-encoded data. -func (enc *Encoding) DecodedLen(n int) int { - if enc.padChar == NoPadding { - return (n*5 + 7) / 8 - } - - return n / 8 * 5 -} diff --git a/vendor/github.com/multiformats/go-base32/package.json b/vendor/github.com/multiformats/go-base32/package.json deleted file mode 100644 index 04a9970..0000000 --- a/vendor/github.com/multiformats/go-base32/package.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "author": "Golang", - "bugs": { - "url": "https://github.com/multiformats/go-base32" - }, - "gx": { - "dvcsimport": "github.com/multiformats/go-base32" - }, - "gxVersion": "0.7.0", - "language": "go", - "license": "BSD-3", - "name": "base32", - "version": "0.0.3" -} - diff --git a/vendor/github.com/multiformats/go-base36/LICENSE.md b/vendor/github.com/multiformats/go-base36/LICENSE.md deleted file mode 100644 index 7557ca9..0000000 --- a/vendor/github.com/multiformats/go-base36/LICENSE.md +++ /dev/null @@ -1,22 +0,0 @@ -The software contents of this repository are Copyright (c) Protocol Labs, -Licensed under the `Permissive License Stack`, meaning either of: - -- Apache-2.0 Software License: https://www.apache.org/licenses/LICENSE-2.0 - ([...4tr2kfsq](https://gateway.ipfs.io/ipfs/bafkreiankqxazcae4onkp436wag2lj3ccso4nawxqkkfckd6cg4tr2kfsq)) - -- MIT Software License: https://opensource.org/licenses/MIT - ([...vljevcba](https://gateway.ipfs.io/ipfs/bafkreiepofszg4gfe2gzuhojmksgemsub2h4uy2gewdnr35kswvljevcba)) - -You may not use the contents of this repository except in compliance -with one of the listed Licenses. For an extended clarification of the -intent behind the choice of Licensing please refer to -https://protocol.ai/blog/announcing-the-permissive-license-stack/ - -Unless required by applicable law or agreed to in writing, software -distributed under the terms listed in this notice is distributed on -an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, -either express or implied. See each License for the specific language -governing permissions and limitations under that License. - - -`SPDX-License-Identifier: Apache-2.0 OR MIT` diff --git a/vendor/github.com/multiformats/go-base36/README.md b/vendor/github.com/multiformats/go-base36/README.md deleted file mode 100644 index a92e27c..0000000 --- a/vendor/github.com/multiformats/go-base36/README.md +++ /dev/null @@ -1,22 +0,0 @@ -multiformats/go-base36 -======================= - -> Simple base36 codec - -This is an optimized codec for []byte <=> base36 string conversion - -## Documentation - -https://pkg.go.dev/github.com/multicodec/go-base36 - -## Lead Maintainer - -[Steven Allen](https://github.com/stebalien) - -## Contributing - -Contributions are welcome! This repository is related to the IPFS project and therefore governed by our [contributing guidelines](https://github.com/ipfs/community/blob/master/CONTRIBUTING.md). - -## License - -[SPDX-License-Identifier: Apache-2.0 OR MIT](LICENSE.md) diff --git a/vendor/github.com/multiformats/go-base36/base36.go b/vendor/github.com/multiformats/go-base36/base36.go deleted file mode 100644 index e4cb931..0000000 --- a/vendor/github.com/multiformats/go-base36/base36.go +++ /dev/null @@ -1,153 +0,0 @@ -/* - -Package base36 provides a reasonably fast implementation of a binary base36 codec. - -*/ -package base36 - -// Simplified code based on https://godoc.org/github.com/mr-tron/base58 -// which in turn is based on https://github.com/trezor/trezor-crypto/commit/89a7d7797b806fac - -import ( - "fmt" -) - -const UcAlphabet = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" -const LcAlphabet = "0123456789abcdefghijklmnopqrstuvwxyz" -const maxDigitOrdinal = byte('z') -const maxDigitValueB36 = 35 - -var revAlphabet [maxDigitOrdinal + 1]byte - -func init() { - for i := range revAlphabet { - revAlphabet[i] = maxDigitValueB36 + 1 - } - for i, c := range UcAlphabet { - revAlphabet[byte(c)] = byte(i) - if c > '9' { - revAlphabet[byte(c)+32] = byte(i) - } - } -} - -// EncodeToStringUc encodes the given byte-buffer as base36 using [0-9A-Z] as -// the digit-alphabet -func EncodeToStringUc(b []byte) string { return encode(b, UcAlphabet) } - -// EncodeToStringLc encodes the given byte-buffer as base36 using [0-9a-z] as -// the digit-alphabet -func EncodeToStringLc(b []byte) string { return encode(b, LcAlphabet) } - -func encode(inBuf []byte, al string) string { - - // As a polar opposite to the base58 implementation, using a uint32 here is - // significantly slower - var carry uint64 - - var encIdx, valIdx, zcnt, high int - - inSize := len(inBuf) - for zcnt < inSize && inBuf[zcnt] == 0 { - zcnt++ - } - - // Really this is log(256)/log(36) or 1.55, but integer math is easier - // Use 2 as a constant and just overallocate - encSize := (inSize - zcnt) * 2 - - // Allocate one big buffer up front - // Note: pools *DO NOT* help, the overhead of zeroing the val-half (see below) - // kills any performance gain to be had - outBuf := make([]byte, (zcnt + encSize*2)) - - // use the second half for the temporary numeric buffer - val := outBuf[encSize+zcnt:] - - high = encSize - 1 - for _, b := range inBuf[zcnt:] { - valIdx = encSize - 1 - for carry = uint64(b); valIdx > high || carry != 0; valIdx-- { - carry += uint64((val[valIdx])) * 256 - val[valIdx] = byte(carry % 36) - carry /= 36 - } - high = valIdx - } - - // Reset the value index to the first significant value position - for valIdx = 0; valIdx < encSize && val[valIdx] == 0; valIdx++ { - } - - // Now write the known-length result to first half of buffer - encSize += zcnt - valIdx - - for encIdx = 0; encIdx < zcnt; encIdx++ { - outBuf[encIdx] = '0' - } - - for encIdx < encSize { - outBuf[encIdx] = al[val[valIdx]] - encIdx++ - valIdx++ - } - - return string(outBuf[:encSize]) -} - -// DecodeString takes a base36 encoded string and returns a slice of the decoded -// bytes. -func DecodeString(s string) ([]byte, error) { - - if len(s) == 0 { - return nil, fmt.Errorf("can not decode zero-length string") - } - - var zcnt int - - for i := 0; i < len(s) && s[i] == '0'; i++ { - zcnt++ - } - - var t, c uint64 - - outi := make([]uint32, (len(s)+3)/4) - binu := make([]byte, (len(s)+3)*3) - - for _, r := range s { - if r > rune(maxDigitOrdinal) || revAlphabet[r] > maxDigitValueB36 { - return nil, fmt.Errorf("invalid base36 character (%q)", r) - } - - c = uint64(revAlphabet[r]) - - for j := len(outi) - 1; j >= 0; j-- { - t = uint64(outi[j])*36 + c - c = (t >> 32) - outi[j] = uint32(t & 0xFFFFFFFF) - } - - } - - mask := (uint(len(s)%4) * 8) - if mask == 0 { - mask = 32 - } - mask -= 8 - var j, cnt int - for j, cnt = 0, 0; j < len(outi); j++ { - for mask < 32 { // loop relies on uint overflow - binu[cnt] = byte(outi[j] >> mask) - mask -= 8 - cnt++ - } - mask = 24 - } - - for n := zcnt; n < len(binu); n++ { - if binu[n] > 0 { - return binu[n-zcnt : cnt], nil - } - } - return binu[:cnt], nil -} diff --git a/vendor/github.com/multiformats/go-multibase/.codecov.yml b/vendor/github.com/multiformats/go-multibase/.codecov.yml deleted file mode 100644 index db24720..0000000 --- a/vendor/github.com/multiformats/go-multibase/.codecov.yml +++ /dev/null @@ -1 +0,0 @@ -comment: off diff --git a/vendor/github.com/multiformats/go-multibase/.gitignore b/vendor/github.com/multiformats/go-multibase/.gitignore deleted file mode 100644 index 175b291..0000000 --- a/vendor/github.com/multiformats/go-multibase/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -*.swp - -multibase-conv/multibase-conv diff --git a/vendor/github.com/multiformats/go-multibase/.gitmodules b/vendor/github.com/multiformats/go-multibase/.gitmodules deleted file mode 100644 index 74c037f..0000000 --- a/vendor/github.com/multiformats/go-multibase/.gitmodules +++ /dev/null @@ -1,3 +0,0 @@ -[submodule "spec"] - path = spec - url = https://github.com/multiformats/multibase.git diff --git a/vendor/github.com/multiformats/go-multibase/.travis.yml b/vendor/github.com/multiformats/go-multibase/.travis.yml deleted file mode 100644 index 09f9a4c..0000000 --- a/vendor/github.com/multiformats/go-multibase/.travis.yml +++ /dev/null @@ -1,30 +0,0 @@ -os: - - linux - -language: go - -go: - - 1.11.x - -env: - global: - - GOTFLAGS="-race" - matrix: - - BUILD_DEPTYPE=gomod - - -# disable travis install -install: - - true - -script: - - bash <(curl -s https://raw.githubusercontent.com/ipfs/ci-helpers/master/travis-ci/run-standard-tests.sh) - - -cache: - directories: - - $GOPATH/pkg/mod - - /home/travis/.cache/go-build - -notifications: - email: false diff --git a/vendor/github.com/multiformats/go-multibase/LICENSE b/vendor/github.com/multiformats/go-multibase/LICENSE deleted file mode 100644 index f64ffb0..0000000 --- a/vendor/github.com/multiformats/go-multibase/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 Protocol Labs Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/multiformats/go-multibase/Makefile b/vendor/github.com/multiformats/go-multibase/Makefile deleted file mode 100644 index ce9a3a1..0000000 --- a/vendor/github.com/multiformats/go-multibase/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -test: deps - go test -count=1 -race -v ./... - -export IPFS_API ?= v04x.ipfs.io - -deps: - go get -t ./... diff --git a/vendor/github.com/multiformats/go-multibase/README.md b/vendor/github.com/multiformats/go-multibase/README.md deleted file mode 100644 index 87e6f24..0000000 --- a/vendor/github.com/multiformats/go-multibase/README.md +++ /dev/null @@ -1,31 +0,0 @@ -# go-multibase - -[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io) -[![](https://img.shields.io/badge/project-multiformats-blue.svg?style=flat-square)](https://github.com/multiformats/multiformats) -[![](https://img.shields.io/badge/freenode-%23ipfs-blue.svg?style=flat-square)](https://webchat.freenode.net/?channels=%23ipfs) -[![](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) -[![Travis CI](https://img.shields.io/travis/multiformats/go-multibase.svg?style=flat-square&branch=master)](https://travis-ci.org/multiformats/go-multibase) -[![codecov.io](https://img.shields.io/codecov/c/github/multiformats/go-multibase.svg?style=flat-square&branch=master)](https://codecov.io/github/multiformats/go-multibase?branch=master) - -> Implementation of [multibase](https://github.com/multiformats/multibase) -self identifying base encodings- in Go. - - -## Install - -`go-multibase` is a standard Go module which can be installed with: - -```sh -go get github.com/multiformats/go-multibase -``` - -## Contribute - -Contributions welcome. Please check out [the issues](https://github.com/multiformats/go-multibase/issues). - -Check out our [contributing document](https://github.com/multiformats/multiformats/blob/master/contributing.md) for more information on how we work, and about contributing in general. Please be aware that all interactions related to multiformats are subject to the IPFS [Code of Conduct](https://github.com/ipfs/community/blob/master/code-of-conduct.md). - -Small note: If editing the README, please conform to the [standard-readme](https://github.com/RichardLitt/standard-readme) specification. - -## License - -[MIT](LICENSE) © 2016 Protocol Labs Inc. diff --git a/vendor/github.com/multiformats/go-multibase/base16.go b/vendor/github.com/multiformats/go-multibase/base16.go deleted file mode 100644 index 6b87941..0000000 --- a/vendor/github.com/multiformats/go-multibase/base16.go +++ /dev/null @@ -1,21 +0,0 @@ -package multibase - -func hexEncodeToStringUpper(src []byte) string { - dst := make([]byte, len(src)*2) - hexEncodeUpper(dst, src) - return string(dst) -} - -var hexTableUppers = [16]byte{ - '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', - 'A', 'B', 'C', 'D', 'E', 'F', -} - -func hexEncodeUpper(dst, src []byte) int { - for i, v := range src { - dst[i*2] = hexTableUppers[v>>4] - dst[i*2+1] = hexTableUppers[v&0x0f] - } - - return len(src) * 2 -} diff --git a/vendor/github.com/multiformats/go-multibase/base2.go b/vendor/github.com/multiformats/go-multibase/base2.go deleted file mode 100644 index 6e3f0cf..0000000 --- a/vendor/github.com/multiformats/go-multibase/base2.go +++ /dev/null @@ -1,52 +0,0 @@ -package multibase - -import ( - "fmt" - "strconv" - "strings" -) - -// binaryEncodeToString takes an array of bytes and returns -// multibase binary representation -func binaryEncodeToString(src []byte) string { - dst := make([]byte, len(src)*8) - encodeBinary(dst, src) - return string(dst) -} - -// encodeBinary takes the src and dst bytes and converts each -// byte to their binary rep using power reduction method -func encodeBinary(dst []byte, src []byte) { - for i, b := range src { - for j := 0; j < 8; j++ { - if b&(1<>3) - - for i, dstIndex := 0, 0; i < len(s); i = i + 8 { - value, err := strconv.ParseInt(s[i:i+8], 2, 0) - if err != nil { - return nil, fmt.Errorf("error while conversion: %s", err) - } - - data[dstIndex] = byte(value) - dstIndex++ - } - - return data, nil -} diff --git a/vendor/github.com/multiformats/go-multibase/base32.go b/vendor/github.com/multiformats/go-multibase/base32.go deleted file mode 100644 index a6fe8eb..0000000 --- a/vendor/github.com/multiformats/go-multibase/base32.go +++ /dev/null @@ -1,17 +0,0 @@ -package multibase - -import ( - b32 "github.com/multiformats/go-base32" -) - -var base32StdLowerPad = b32.NewEncodingCI("abcdefghijklmnopqrstuvwxyz234567") -var base32StdLowerNoPad = base32StdLowerPad.WithPadding(b32.NoPadding) - -var base32StdUpperPad = b32.NewEncodingCI("ABCDEFGHIJKLMNOPQRSTUVWXYZ234567") -var base32StdUpperNoPad = base32StdUpperPad.WithPadding(b32.NoPadding) - -var base32HexLowerPad = b32.NewEncodingCI("0123456789abcdefghijklmnopqrstuv") -var base32HexLowerNoPad = base32HexLowerPad.WithPadding(b32.NoPadding) - -var base32HexUpperPad = b32.NewEncodingCI("0123456789ABCDEFGHIJKLMNOPQRSTUV") -var base32HexUpperNoPad = base32HexUpperPad.WithPadding(b32.NoPadding) diff --git a/vendor/github.com/multiformats/go-multibase/encoder.go b/vendor/github.com/multiformats/go-multibase/encoder.go deleted file mode 100644 index 42e753f..0000000 --- a/vendor/github.com/multiformats/go-multibase/encoder.go +++ /dev/null @@ -1,63 +0,0 @@ -package multibase - -import ( - "fmt" -) - -// Encoder is a multibase encoding that is verified to be supported and -// supports an Encode method that does not return an error -type Encoder struct { - enc Encoding -} - -// NewEncoder create a new Encoder from an Encoding -func NewEncoder(base Encoding) (Encoder, error) { - _, ok := EncodingToStr[base] - if !ok { - return Encoder{-1}, fmt.Errorf("Unsupported multibase encoding: %d", base) - } - return Encoder{base}, nil -} - -// MustNewEncoder is like NewEncoder but will panic if the encoding is -// invalid. -func MustNewEncoder(base Encoding) Encoder { - _, ok := EncodingToStr[base] - if !ok { - panic("Unsupported multibase encoding") - } - return Encoder{base} -} - -// EncoderByName creates an encoder from a string, the string can -// either be the multibase name or single character multibase prefix -func EncoderByName(str string) (Encoder, error) { - var base Encoding - ok := true - if len(str) == 0 { - return Encoder{-1}, fmt.Errorf("Empty multibase encoding") - } else if len(str) == 1 { - base = Encoding(str[0]) - _, ok = EncodingToStr[base] - } else { - base, ok = Encodings[str] - } - if !ok { - return Encoder{-1}, fmt.Errorf("Unsupported multibase encoding: %s", str) - } - return Encoder{base}, nil -} - -func (p Encoder) Encoding() Encoding { - return p.enc -} - -// Encode encodes the multibase using the given Encoder. -func (p Encoder) Encode(data []byte) string { - str, err := Encode(p.enc, data) - if err != nil { - // should not happen - panic(err) - } - return str -} diff --git a/vendor/github.com/multiformats/go-multibase/multibase.go b/vendor/github.com/multiformats/go-multibase/multibase.go deleted file mode 100644 index 87b8118..0000000 --- a/vendor/github.com/multiformats/go-multibase/multibase.go +++ /dev/null @@ -1,185 +0,0 @@ -package multibase - -import ( - "encoding/base64" - "encoding/hex" - "fmt" - - b58 "github.com/mr-tron/base58/base58" - b32 "github.com/multiformats/go-base32" - b36 "github.com/multiformats/go-base36" -) - -// Encoding identifies the type of base-encoding that a multibase is carrying. -type Encoding int - -// These are the encodings specified in the standard, not are all -// supported yet -const ( - Identity = 0x00 - Base2 = '0' - Base8 = '7' - Base10 = '9' - Base16 = 'f' - Base16Upper = 'F' - Base32 = 'b' - Base32Upper = 'B' - Base32pad = 'c' - Base32padUpper = 'C' - Base32hex = 'v' - Base32hexUpper = 'V' - Base32hexPad = 't' - Base32hexPadUpper = 'T' - Base36 = 'k' - Base36Upper = 'K' - Base58BTC = 'z' - Base58Flickr = 'Z' - Base64 = 'm' - Base64url = 'u' - Base64pad = 'M' - Base64urlPad = 'U' -) - -// EncodingToStr is a map of the supported encoding, unsupported encoding -// specified in standard are left out -var EncodingToStr = map[Encoding]string{ - 0x00: "identity", - '0': "base2", - 'f': "base16", - 'F': "base16upper", - 'b': "base32", - 'B': "base32upper", - 'c': "base32pad", - 'C': "base32padupper", - 'v': "base32hex", - 'V': "base32hexupper", - 't': "base32hexpad", - 'T': "base32hexpadupper", - 'k': "base36", - 'K': "base36upper", - 'z': "base58btc", - 'Z': "base58flickr", - 'm': "base64", - 'u': "base64url", - 'M': "base64pad", - 'U': "base64urlpad", -} - -var Encodings = map[string]Encoding{} - -func init() { - for e, n := range EncodingToStr { - Encodings[n] = e - } -} - -// ErrUnsupportedEncoding is returned when the selected encoding is not known or -// implemented. -var ErrUnsupportedEncoding = fmt.Errorf("selected encoding not supported") - -// Encode encodes a given byte slice with the selected encoding and returns a -// multibase string (). It will return -// an error if the selected base is not known. -func Encode(base Encoding, data []byte) (string, error) { - switch base { - case Identity: - // 0x00 inside a string is OK in golang and causes no problems with the length calculation. - return string(Identity) + string(data), nil - case Base2: - return string(Base2) + binaryEncodeToString(data), nil - case Base16: - return string(Base16) + hex.EncodeToString(data), nil - case Base16Upper: - return string(Base16Upper) + hexEncodeToStringUpper(data), nil - case Base32: - return string(Base32) + base32StdLowerNoPad.EncodeToString(data), nil - case Base32Upper: - return string(Base32Upper) + base32StdUpperNoPad.EncodeToString(data), nil - case Base32hex: - return string(Base32hex) + base32HexLowerNoPad.EncodeToString(data), nil - case Base32hexUpper: - return string(Base32hexUpper) + base32HexUpperNoPad.EncodeToString(data), nil - case Base32pad: - return string(Base32pad) + base32StdLowerPad.EncodeToString(data), nil - case Base32padUpper: - return string(Base32padUpper) + base32StdUpperPad.EncodeToString(data), nil - case Base32hexPad: - return string(Base32hexPad) + base32HexLowerPad.EncodeToString(data), nil - case Base32hexPadUpper: - return string(Base32hexPadUpper) + base32HexUpperPad.EncodeToString(data), nil - case Base36: - return string(Base36) + b36.EncodeToStringLc(data), nil - case Base36Upper: - return string(Base36Upper) + b36.EncodeToStringUc(data), nil - case Base58BTC: - return string(Base58BTC) + b58.EncodeAlphabet(data, b58.BTCAlphabet), nil - case Base58Flickr: - return string(Base58Flickr) + b58.EncodeAlphabet(data, b58.FlickrAlphabet), nil - case Base64pad: - return string(Base64pad) + base64.StdEncoding.EncodeToString(data), nil - case Base64urlPad: - return string(Base64urlPad) + base64.URLEncoding.EncodeToString(data), nil - case Base64url: - return string(Base64url) + base64.RawURLEncoding.EncodeToString(data), nil - case Base64: - return string(Base64) + base64.RawStdEncoding.EncodeToString(data), nil - default: - return "", ErrUnsupportedEncoding - } -} - -// Decode takes a multibase string and decodes into a bytes buffer. -// It will return an error if the selected base is not known. -func Decode(data string) (Encoding, []byte, error) { - if len(data) == 0 { - return 0, nil, fmt.Errorf("cannot decode multibase for zero length string") - } - - enc := Encoding(data[0]) - - switch enc { - case Identity: - return Identity, []byte(data[1:]), nil - case Base2: - bytes, err := decodeBinaryString(data[1:]) - return enc, bytes, err - case Base16, Base16Upper: - bytes, err := hex.DecodeString(data[1:]) - return enc, bytes, err - case Base32, Base32Upper: - bytes, err := b32.RawStdEncoding.DecodeString(data[1:]) - return enc, bytes, err - case Base32hex, Base32hexUpper: - bytes, err := b32.RawHexEncoding.DecodeString(data[1:]) - return enc, bytes, err - case Base32pad, Base32padUpper: - bytes, err := b32.StdEncoding.DecodeString(data[1:]) - return enc, bytes, err - case Base32hexPad, Base32hexPadUpper: - bytes, err := b32.HexEncoding.DecodeString(data[1:]) - return enc, bytes, err - case Base36, Base36Upper: - bytes, err := b36.DecodeString(data[1:]) - return enc, bytes, err - case Base58BTC: - bytes, err := b58.DecodeAlphabet(data[1:], b58.BTCAlphabet) - return Base58BTC, bytes, err - case Base58Flickr: - bytes, err := b58.DecodeAlphabet(data[1:], b58.FlickrAlphabet) - return Base58Flickr, bytes, err - case Base64pad: - bytes, err := base64.StdEncoding.DecodeString(data[1:]) - return Base64pad, bytes, err - case Base64urlPad: - bytes, err := base64.URLEncoding.DecodeString(data[1:]) - return Base64urlPad, bytes, err - case Base64: - bytes, err := base64.RawStdEncoding.DecodeString(data[1:]) - return Base64, bytes, err - case Base64url: - bytes, err := base64.RawURLEncoding.DecodeString(data[1:]) - return Base64url, bytes, err - default: - return -1, nil, ErrUnsupportedEncoding - } -} diff --git a/vendor/github.com/multiformats/go-multibase/package.json b/vendor/github.com/multiformats/go-multibase/package.json deleted file mode 100644 index e5b8365..0000000 --- a/vendor/github.com/multiformats/go-multibase/package.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "author": "whyrusleeping", - "bugs": { - "url": "https://github.com/multiformats/go-multibase" - }, - "language": "go", - "license": "", - "name": "go-multibase", - "version": "0.3.0" -} diff --git a/vendor/github.com/multiformats/go-multihash/.gitignore b/vendor/github.com/multiformats/go-multihash/.gitignore deleted file mode 100644 index 1d74e21..0000000 --- a/vendor/github.com/multiformats/go-multihash/.gitignore +++ /dev/null @@ -1 +0,0 @@ -.vscode/ diff --git a/vendor/github.com/multiformats/go-multihash/.gitmodules b/vendor/github.com/multiformats/go-multihash/.gitmodules deleted file mode 100644 index d92ce4f..0000000 --- a/vendor/github.com/multiformats/go-multihash/.gitmodules +++ /dev/null @@ -1,6 +0,0 @@ -[submodule "spec/multicodec"] - path = spec/multicodec - url = https://github.com/multiformats/multicodec.git -[submodule "spec/multihash"] - path = spec/multihash - url = https://github.com/multiformats/multihash.git diff --git a/vendor/github.com/multiformats/go-multihash/.travis.yml b/vendor/github.com/multiformats/go-multihash/.travis.yml deleted file mode 100644 index 02f3c37..0000000 --- a/vendor/github.com/multiformats/go-multihash/.travis.yml +++ /dev/null @@ -1,30 +0,0 @@ -os: - - linux - -language: go - -go: - - 1.15.x - -env: - global: - - GOTFLAGS="-race" - matrix: - - BUILD_DEPTYPE=gomod - - -# disable travis install -install: - - true - -script: - - bash <(curl -s https://raw.githubusercontent.com/ipfs/ci-helpers/master/travis-ci/run-standard-tests.sh) - - -cache: - directories: - - $GOPATH/pkg/mod - - /home/travis/.cache/go-build - -notifications: - email: false diff --git a/vendor/github.com/multiformats/go-multihash/LICENSE b/vendor/github.com/multiformats/go-multihash/LICENSE deleted file mode 100644 index c7386b3..0000000 --- a/vendor/github.com/multiformats/go-multihash/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Juan Batiz-Benet - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/multiformats/go-multihash/Makefile b/vendor/github.com/multiformats/go-multihash/Makefile deleted file mode 100644 index 2061941..0000000 --- a/vendor/github.com/multiformats/go-multihash/Makefile +++ /dev/null @@ -1,11 +0,0 @@ -gx: - go get github.com/whyrusleeping/gx - go get github.com/whyrusleeping/gx-go - -deps: gx - gx --verbose install --global - gx-go rewrite - -publish: - gx-go rewrite --undo - diff --git a/vendor/github.com/multiformats/go-multihash/README.md b/vendor/github.com/multiformats/go-multihash/README.md deleted file mode 100644 index dd7f238..0000000 --- a/vendor/github.com/multiformats/go-multihash/README.md +++ /dev/null @@ -1,90 +0,0 @@ -# go-multihash - -[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io) -[![](https://img.shields.io/badge/project-multiformats-blue.svg?style=flat-square)](https://github.com/multiformats/multiformats) -[![](https://img.shields.io/badge/freenode-%23ipfs-blue.svg?style=flat-square)](https://webchat.freenode.net/?channels=%23ipfs) -[![](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) -[![GoDoc](https://godoc.org/github.com/multiformats/go-multihash?status.svg)](https://godoc.org/github.com/multiformats/go-multihash) -[![Travis CI](https://img.shields.io/travis/multiformats/go-multihash.svg?style=flat-square&branch=master)](https://travis-ci.org/multiformats/go-multihash) -[![codecov.io](https://img.shields.io/codecov/c/github/multiformats/go-multihash.svg?style=flat-square&branch=master)](https://codecov.io/github/multiformats/go-multihash?branch=master) - -> [multihash](https://github.com/multiformats/multihash) implementation in Go - -## Table of Contents - -- [Install](#install) -- [Usage](#usage) -- [Maintainers](#maintainers) -- [Contribute](#contribute) -- [License](#license) - -## Install - -`go-multihash` is a standard Go module which can be installed with: - -```sh -go get github.com/multiformats/go-multihash -``` - -## Usage - - -### Example - -This example takes a standard hex-encoded data and uses `EncodeName` to calculate the SHA1 multihash value for the buffer. - -The resulting hex-encoded data corresponds to: ``, which could be re-parsed -with `Multihash.FromHexString()`. - - -```go -package main - -import ( - "encoding/hex" - "fmt" - - "github.com/multiformats/go-multihash" -) - -func main() { - // ignores errors for simplicity. - // don't do that at home. - // Decode a SHA1 hash to a binary buffer - buf, _ := hex.DecodeString("0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33") - - // Create a new multihash with it. - mHashBuf, _ := multihash.EncodeName(buf, "sha1") - // Print the multihash as hex string - fmt.Printf("hex: %s\n", hex.EncodeToString(mHashBuf)) - - // Parse the binary multihash to a DecodedMultihash - mHash, _ := multihash.Decode(mHashBuf) - // Convert the sha1 value to hex string - sha1hex := hex.EncodeToString(mHash.Digest) - // Print all the information in the multihash - fmt.Printf("obj: %v 0x%x %d %s\n", mHash.Name, mHash.Code, mHash.Length, sha1hex) -} -``` - -To run, copy to [example/foo.go](example/foo.go) and: - -``` -> cd example/ -> go build -> ./example -hex: 11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33 -obj: sha1 0x11 20 0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33 -``` - -## Contribute - -Contributions welcome. Please check out [the issues](https://github.com/multiformats/go-multihash/issues). - -Check out our [contributing document](https://github.com/multiformats/multiformats/blob/master/contributing.md) for more information on how we work, and about contributing in general. Please be aware that all interactions related to multiformats are subject to the IPFS [Code of Conduct](https://github.com/ipfs/community/blob/master/code-of-conduct.md). - -Small note: If editing the README, please conform to the [standard-readme](https://github.com/RichardLitt/standard-readme) specification. - -## License - -[MIT](LICENSE) © 2014 Juan Batiz-Benet diff --git a/vendor/github.com/multiformats/go-multihash/codecov.yml b/vendor/github.com/multiformats/go-multihash/codecov.yml deleted file mode 100644 index 5f88a9e..0000000 --- a/vendor/github.com/multiformats/go-multihash/codecov.yml +++ /dev/null @@ -1,3 +0,0 @@ -coverage: - range: "50...100" -comment: off diff --git a/vendor/github.com/multiformats/go-multihash/core/errata.go b/vendor/github.com/multiformats/go-multihash/core/errata.go deleted file mode 100644 index 8a91b66..0000000 --- a/vendor/github.com/multiformats/go-multihash/core/errata.go +++ /dev/null @@ -1,34 +0,0 @@ -package multihash - -import ( - "bytes" - "crypto/sha256" - "hash" -) - -type identityMultihash struct { - bytes.Buffer -} - -func (identityMultihash) BlockSize() int { - return 32 // A prefered block size is nonsense for the "identity" "hash". An arbitrary but unsurprising and positive nonzero number has been chosen to minimize the odds of fascinating bugs. -} - -func (x *identityMultihash) Size() int { - return x.Len() -} - -func (x *identityMultihash) Sum(digest []byte) []byte { - return x.Bytes() -} - -type doubleSha256 struct { - hash.Hash -} - -func (x doubleSha256) Sum(digest []byte) []byte { - digest = x.Hash.Sum(digest) - h2 := sha256.New() - h2.Write(digest) - return h2.Sum(digest[0:0]) -} diff --git a/vendor/github.com/multiformats/go-multihash/core/magic.go b/vendor/github.com/multiformats/go-multihash/core/magic.go deleted file mode 100644 index 78ae8c1..0000000 --- a/vendor/github.com/multiformats/go-multihash/core/magic.go +++ /dev/null @@ -1,26 +0,0 @@ -package multihash - -import "errors" - -// ErrSumNotSupported is returned when the Sum function code is not implemented -var ErrSumNotSupported = errors.New("no such hash registered") - -// constants -const ( - IDENTITY = 0x00 - SHA1 = 0x11 - SHA2_256 = 0x12 - SHA2_512 = 0x13 - SHA3_224 = 0x17 - SHA3_256 = 0x16 - SHA3_384 = 0x15 - SHA3_512 = 0x14 - KECCAK_224 = 0x1A - KECCAK_256 = 0x1B - KECCAK_384 = 0x1C - KECCAK_512 = 0x1D - SHAKE_128 = 0x18 - SHAKE_256 = 0x19 - MD5 = 0xd5 - DBL_SHA2_256 = 0x56 -) diff --git a/vendor/github.com/multiformats/go-multihash/core/registry.go b/vendor/github.com/multiformats/go-multihash/core/registry.go deleted file mode 100644 index 64e8af3..0000000 --- a/vendor/github.com/multiformats/go-multihash/core/registry.go +++ /dev/null @@ -1,77 +0,0 @@ -package multihash - -import ( - "crypto/md5" - "crypto/sha1" - "crypto/sha256" - "crypto/sha512" - "fmt" - "hash" -) - -// registry is a simple map which maps a multihash indicator number -// to a standard golang Hash interface. -// -// Multihash indicator numbers are reserved and described in -// https://github.com/multiformats/multicodec/blob/master/table.csv . -// The keys used in this map must match those reservations. -// -// Hashers which are available in the golang stdlib will be registered automatically. -// Others can be added using the Register function. -var registry = make(map[uint64]func() hash.Hash) - -// Register adds a new hash to the set available from GetHasher and Sum. -// -// Register has a global effect and should only be used at package init time to avoid data races. -// -// The indicator code should be per the numbers reserved and described in -// https://github.com/multiformats/multicodec/blob/master/table.csv . -// -// If Register is called with the same indicator code more than once, the last call wins. -// In practice, this means that if an application has a strong opinion about what implementation to use for a certain hash -// (e.g., perhaps they want to override the sha256 implementation to use a special hand-rolled assembly variant -// rather than the stdlib one which is registered by default), -// then this can be done by making a Register call with that effect at init time in the application's main package. -// This should have the desired effect because the root of the import tree has its init time effect last. -func Register(indicator uint64, hasherFactory func() hash.Hash) { - if hasherFactory == nil { - panic("not sensible to attempt to register a nil function") - } - registry[indicator] = hasherFactory - DefaultLengths[indicator] = hasherFactory().Size() -} - -// GetHasher returns a new hash.Hash according to the indicator code number provided. -// -// The indicator code should be per the numbers reserved and described in -// https://github.com/multiformats/multicodec/blob/master/table.csv . -// -// The actual hashers available are determined by what has been registered. -// The registry automatically contains those hashers which are available in the golang standard libraries -// (which includes md5, sha1, sha256, sha384, sha512, and the "identity" mulithash, among others). -// Other hash implementations can be made available by using the Register function. -// The 'go-mulithash/register/*' packages can also be imported to gain more common hash functions. -// -// If an error is returned, it will match `errors.Is(err, ErrSumNotSupported)`. -func GetHasher(indicator uint64) (hash.Hash, error) { - factory, exists := registry[indicator] - if !exists { - return nil, fmt.Errorf("unknown multihash code %d (0x%x): %w", indicator, indicator, ErrSumNotSupported) - } - return factory(), nil -} - -// DefaultLengths maps a multihash indicator code to the output size for that hash, in units of bytes. -// -// This map is populated when a hash function is registered by the Register function. -// It's effectively a shortcut for asking Size() on the hash.Hash. -var DefaultLengths = map[uint64]int{} - -func init() { - Register(IDENTITY, func() hash.Hash { return &identityMultihash{} }) - Register(MD5, md5.New) - Register(SHA1, sha1.New) - Register(SHA2_256, sha256.New) - Register(SHA2_512, sha512.New) - Register(DBL_SHA2_256, func() hash.Hash { return &doubleSha256{sha256.New()} }) -} diff --git a/vendor/github.com/multiformats/go-multihash/io.go b/vendor/github.com/multiformats/go-multihash/io.go deleted file mode 100644 index 3a31baa..0000000 --- a/vendor/github.com/multiformats/go-multihash/io.go +++ /dev/null @@ -1,98 +0,0 @@ -package multihash - -import ( - "errors" - "io" - "math" - - "github.com/multiformats/go-varint" -) - -// Reader is an io.Reader wrapper that exposes a function -// to read a whole multihash, parse it, and return it. -type Reader interface { - io.Reader - - ReadMultihash() (Multihash, error) -} - -// Writer is an io.Writer wrapper that exposes a function -// to write a whole multihash. -type Writer interface { - io.Writer - - WriteMultihash(Multihash) error -} - -// NewReader wraps an io.Reader with a multihash.Reader -func NewReader(r io.Reader) Reader { - return &mhReader{r} -} - -// NewWriter wraps an io.Writer with a multihash.Writer -func NewWriter(w io.Writer) Writer { - return &mhWriter{w} -} - -type mhReader struct { - r io.Reader -} - -func (r *mhReader) Read(buf []byte) (n int, err error) { - return r.r.Read(buf) -} - -func (r *mhReader) ReadByte() (byte, error) { - if br, ok := r.r.(io.ByteReader); ok { - return br.ReadByte() - } - var b [1]byte - n, err := r.r.Read(b[:]) - if n == 1 { - return b[0], nil - } - if err == nil { - if n != 0 { - panic("reader returned an invalid length") - } - err = io.ErrNoProgress - } - return 0, err -} - -func (r *mhReader) ReadMultihash() (Multihash, error) { - code, err := varint.ReadUvarint(r) - if err != nil { - return nil, err - } - - length, err := varint.ReadUvarint(r) - if err != nil { - return nil, err - } - if length > math.MaxInt32 { - return nil, errors.New("digest too long, supporting only <= 2^31-1") - } - - buf := make([]byte, varint.UvarintSize(code)+varint.UvarintSize(length)+int(length)) - n := varint.PutUvarint(buf, code) - n += varint.PutUvarint(buf[n:], length) - if _, err := io.ReadFull(r.r, buf[n:]); err != nil { - return nil, err - } - - return Cast(buf) -} - -type mhWriter struct { - w io.Writer -} - -func (w *mhWriter) Write(buf []byte) (n int, err error) { - return w.w.Write(buf) -} - -func (w *mhWriter) WriteMultihash(m Multihash) error { - _, err := w.w.Write([]byte(m)) - return err -} diff --git a/vendor/github.com/multiformats/go-multihash/multihash.go b/vendor/github.com/multiformats/go-multihash/multihash.go deleted file mode 100644 index 9eeb9ca..0000000 --- a/vendor/github.com/multiformats/go-multihash/multihash.go +++ /dev/null @@ -1,302 +0,0 @@ -// Package multihash is the Go implementation of -// https://github.com/multiformats/multihash, or self-describing -// hashes. -package multihash - -import ( - "encoding/hex" - "errors" - "fmt" - "math" - - b58 "github.com/mr-tron/base58/base58" - "github.com/multiformats/go-varint" -) - -// errors -var ( - ErrUnknownCode = errors.New("unknown multihash code") - ErrTooShort = errors.New("multihash too short. must be >= 2 bytes") - ErrTooLong = errors.New("multihash too long. must be < 129 bytes") - ErrLenNotSupported = errors.New("multihash does not yet support digests longer than 127 bytes") - ErrInvalidMultihash = errors.New("input isn't valid multihash") - - ErrVarintBufferShort = errors.New("uvarint: buffer too small") - ErrVarintTooLong = errors.New("uvarint: varint too big (max 64bit)") -) - -// ErrInconsistentLen is returned when a decoded multihash has an inconsistent length -type ErrInconsistentLen struct { - dm *DecodedMultihash -} - -func (e ErrInconsistentLen) Error() string { - return fmt.Sprintf("multihash length inconsistent: expected %d, got %d", e.dm.Length, len(e.dm.Digest)) -} - -// constants -const ( - IDENTITY = 0x00 - // Deprecated: use IDENTITY - ID = IDENTITY - SHA1 = 0x11 - SHA2_256 = 0x12 - SHA2_512 = 0x13 - SHA3_224 = 0x17 - SHA3_256 = 0x16 - SHA3_384 = 0x15 - SHA3_512 = 0x14 - SHA3 = SHA3_512 - KECCAK_224 = 0x1A - KECCAK_256 = 0x1B - KECCAK_384 = 0x1C - KECCAK_512 = 0x1D - - SHAKE_128 = 0x18 - SHAKE_256 = 0x19 - - BLAKE2B_MIN = 0xb201 - BLAKE2B_MAX = 0xb240 - BLAKE2S_MIN = 0xb241 - BLAKE2S_MAX = 0xb260 - - MD5 = 0xd5 - - DBL_SHA2_256 = 0x56 - - MURMUR3_128 = 0x22 - // Deprecated: use MURMUR3_128 - MURMUR3 = MURMUR3_128 - - SHA2_256_TRUNC254_PADDED = 0x1012 - X11 = 0x1100 - POSEIDON_BLS12_381_A1_FC1 = 0xb401 -) - -func init() { - // Add blake2b (64 codes) - for c := uint64(BLAKE2B_MIN); c <= BLAKE2B_MAX; c++ { - n := c - BLAKE2B_MIN + 1 - name := fmt.Sprintf("blake2b-%d", n*8) - Names[name] = c - Codes[c] = name - } - - // Add blake2s (32 codes) - for c := uint64(BLAKE2S_MIN); c <= BLAKE2S_MAX; c++ { - n := c - BLAKE2S_MIN + 1 - name := fmt.Sprintf("blake2s-%d", n*8) - Names[name] = c - Codes[c] = name - } -} - -// Names maps the name of a hash to the code -var Names = map[string]uint64{ - "identity": IDENTITY, - "sha1": SHA1, - "sha2-256": SHA2_256, - "sha2-512": SHA2_512, - "sha3": SHA3_512, - "sha3-224": SHA3_224, - "sha3-256": SHA3_256, - "sha3-384": SHA3_384, - "sha3-512": SHA3_512, - "dbl-sha2-256": DBL_SHA2_256, - "murmur3-128": MURMUR3_128, - "keccak-224": KECCAK_224, - "keccak-256": KECCAK_256, - "keccak-384": KECCAK_384, - "keccak-512": KECCAK_512, - "shake-128": SHAKE_128, - "shake-256": SHAKE_256, - "sha2-256-trunc254-padded": SHA2_256_TRUNC254_PADDED, - "x11": X11, - "md5": MD5, - "poseidon-bls12_381-a2-fc1": POSEIDON_BLS12_381_A1_FC1, -} - -// Codes maps a hash code to it's name -var Codes = map[uint64]string{ - IDENTITY: "identity", - SHA1: "sha1", - SHA2_256: "sha2-256", - SHA2_512: "sha2-512", - SHA3_224: "sha3-224", - SHA3_256: "sha3-256", - SHA3_384: "sha3-384", - SHA3_512: "sha3-512", - DBL_SHA2_256: "dbl-sha2-256", - MURMUR3_128: "murmur3-128", - KECCAK_224: "keccak-224", - KECCAK_256: "keccak-256", - KECCAK_384: "keccak-384", - KECCAK_512: "keccak-512", - SHAKE_128: "shake-128", - SHAKE_256: "shake-256", - SHA2_256_TRUNC254_PADDED: "sha2-256-trunc254-padded", - X11: "x11", - POSEIDON_BLS12_381_A1_FC1: "poseidon-bls12_381-a2-fc1", - MD5: "md5", -} - -func uvarint(buf []byte) (uint64, []byte, error) { - n, c, err := varint.FromUvarint(buf) - if err != nil { - return n, buf, err - } - - if c == 0 { - return n, buf, ErrVarintBufferShort - } else if c < 0 { - return n, buf[-c:], ErrVarintTooLong - } else { - return n, buf[c:], nil - } -} - -// DecodedMultihash represents a parsed multihash and allows -// easy access to the different parts of a multihash. -type DecodedMultihash struct { - Code uint64 - Name string - Length int // Length is just int as it is type of len() opearator - Digest []byte // Digest holds the raw multihash bytes -} - -// Multihash is byte slice with the following form: -// . -// See the spec for more information. -type Multihash []byte - -// HexString returns the hex-encoded representation of a multihash. -func (m *Multihash) HexString() string { - return hex.EncodeToString([]byte(*m)) -} - -// String is an alias to HexString(). -func (m *Multihash) String() string { - return m.HexString() -} - -// FromHexString parses a hex-encoded multihash. -func FromHexString(s string) (Multihash, error) { - b, err := hex.DecodeString(s) - if err != nil { - return Multihash{}, err - } - - return Cast(b) -} - -// B58String returns the B58-encoded representation of a multihash. -func (m Multihash) B58String() string { - return b58.Encode([]byte(m)) -} - -// FromB58String parses a B58-encoded multihash. -func FromB58String(s string) (m Multihash, err error) { - b, err := b58.Decode(s) - if err != nil { - return Multihash{}, ErrInvalidMultihash - } - - return Cast(b) -} - -// Cast casts a buffer onto a multihash, and returns an error -// if it does not work. -func Cast(buf []byte) (Multihash, error) { - _, err := Decode(buf) - if err != nil { - return Multihash{}, err - } - - return Multihash(buf), nil -} - -// Decode parses multihash bytes into a DecodedMultihash. -func Decode(buf []byte) (*DecodedMultihash, error) { - rlen, code, hdig, err := readMultihashFromBuf(buf) - if err != nil { - return nil, err - } - - dm := &DecodedMultihash{ - Code: code, - Name: Codes[code], - Length: len(hdig), - Digest: hdig, - } - - if len(buf) != rlen { - return nil, ErrInconsistentLen{dm} - } - - return dm, nil -} - -// Encode a hash digest along with the specified function code. -// Note: the length is derived from the length of the digest itself. -// -// The error return is legacy; it is always nil. -func Encode(buf []byte, code uint64) ([]byte, error) { - // FUTURE: this function always causes heap allocs... but when used, this value is almost always going to be appended to another buffer (either as part of CID creation, or etc) -- should this whole function be rethought and alternatives offered? - newBuf := make([]byte, varint.UvarintSize(code)+varint.UvarintSize(uint64(len(buf)))+len(buf)) - n := varint.PutUvarint(newBuf, code) - n += varint.PutUvarint(newBuf[n:], uint64(len(buf))) - - copy(newBuf[n:], buf) - return newBuf, nil -} - -// EncodeName is like Encode() but providing a string name -// instead of a numeric code. See Names for allowed values. -func EncodeName(buf []byte, name string) ([]byte, error) { - return Encode(buf, Names[name]) -} - -// readMultihashFromBuf reads a multihash from the given buffer, returning the -// individual pieces of the multihash. -// Note: the returned digest is a slice over the passed in data and should be -// copied if the buffer will be reused -func readMultihashFromBuf(buf []byte) (int, uint64, []byte, error) { - bufl := len(buf) - if bufl < 2 { - return 0, 0, nil, ErrTooShort - } - - var err error - var code, length uint64 - - code, buf, err = uvarint(buf) - if err != nil { - return 0, 0, nil, err - } - - length, buf, err = uvarint(buf) - if err != nil { - return 0, 0, nil, err - } - - if length > math.MaxInt32 { - return 0, 0, nil, errors.New("digest too long, supporting only <= 2^31-1") - } - if int(length) > len(buf) { - return 0, 0, nil, errors.New("length greater than remaining number of bytes in buffer") - } - - rlen := (bufl - len(buf)) + int(length) - return rlen, code, buf[:length], nil -} - -// MHFromBytes reads a multihash from the given byte buffer, returning the -// number of bytes read as well as the multihash -func MHFromBytes(buf []byte) (int, Multihash, error) { - nr, _, _, err := readMultihashFromBuf(buf) - if err != nil { - return 0, nil, err - } - - return nr, Multihash(buf[:nr]), nil -} diff --git a/vendor/github.com/multiformats/go-multihash/register/all/multihash_all.go b/vendor/github.com/multiformats/go-multihash/register/all/multihash_all.go deleted file mode 100644 index 711c4d7..0000000 --- a/vendor/github.com/multiformats/go-multihash/register/all/multihash_all.go +++ /dev/null @@ -1,22 +0,0 @@ -/* - This package has no purpose except to perform registration of mulithashes. - - It is meant to be used as a side-effecting import, e.g. - - import ( - _ "github.com/multiformats/go-multihash/register/all" - ) - - This package registers many multihashes at once. - Importing it will increase the size of your dependency tree significantly. - It's recommended that you import this package if you're building some - kind of data broker application, which may need to handle many different kinds of hashes; - if you're building an application which you know only handles a specific hash, - importing this package may bloat your builds unnecessarily. -*/ -package all - -import ( - _ "github.com/multiformats/go-multihash/register/blake2" - _ "github.com/multiformats/go-multihash/register/sha3" -) diff --git a/vendor/github.com/multiformats/go-multihash/register/blake2/multihash_blake2.go b/vendor/github.com/multiformats/go-multihash/register/blake2/multihash_blake2.go deleted file mode 100644 index de8f51c..0000000 --- a/vendor/github.com/multiformats/go-multihash/register/blake2/multihash_blake2.go +++ /dev/null @@ -1,63 +0,0 @@ -/* - This package has no purpose except to perform registration of multihashes. - - It is meant to be used as a side-effecting import, e.g. - - import ( - _ "github.com/multiformats/go-multihash/register/blake2" - ) - - This package registers several multihashes for the blake2 family - (both the 's' and the 'b' variants, and in a variety of sizes). -*/ -package blake2 - -import ( - "hash" - - "github.com/minio/blake2b-simd" - "golang.org/x/crypto/blake2s" - - "github.com/multiformats/go-multihash/core" -) - -const ( - blake2b_min = 0xb201 - blake2b_max = 0xb240 - blake2s_min = 0xb241 - blake2s_max = 0xb260 -) - -func init() { - // blake2s - // This package only enables support for 32byte (256 bit) blake2s. - multihash.Register(blake2s_min+31, func() hash.Hash { h, _ := blake2s.New256(nil); return h }) - - // blake2b - // There's a whole range of these. - for c := uint64(blake2b_min); c <= blake2b_max; c++ { - size := int(c - blake2b_min + 1) - - // special case these lengths to avoid allocations. - switch size { - case 32: - multihash.Register(c, blake2b.New256) - continue - case 64: - multihash.Register(c, blake2b.New512) - continue - } - - // Ok, allocate away. - // (The config object here being a pointer is a tad unfortunate, - // but we manage amortize it away by making them just once anyway.) - cfg := &blake2b.Config{Size: uint8(size)} - multihash.Register(c, func() hash.Hash { - hasher, err := blake2b.New(cfg) - if err != nil { - panic(err) - } - return hasher - }) - } -} diff --git a/vendor/github.com/multiformats/go-multihash/register/miniosha256/multihash_miniosha256.go b/vendor/github.com/multiformats/go-multihash/register/miniosha256/multihash_miniosha256.go deleted file mode 100644 index 66eccd5..0000000 --- a/vendor/github.com/multiformats/go-multihash/register/miniosha256/multihash_miniosha256.go +++ /dev/null @@ -1,23 +0,0 @@ -/* - This package has no purpose except to perform registration of multihashes. - - It is meant to be used as a side-effecting import, e.g. - - import ( - _ "github.com/multiformats/go-multihash/register/miniosha256" - ) - - This package registers alternative implementations for sha2-256, using - the github.com/minio/sha256-simd library. -*/ -package miniosha256 - -import ( - "github.com/minio/sha256-simd" - - "github.com/multiformats/go-multihash/core" -) - -func init() { - multihash.Register(multihash.SHA2_256, sha256.New) -} diff --git a/vendor/github.com/multiformats/go-multihash/register/sha3/multihash_sha3.go b/vendor/github.com/multiformats/go-multihash/register/sha3/multihash_sha3.go deleted file mode 100644 index db70b2b..0000000 --- a/vendor/github.com/multiformats/go-multihash/register/sha3/multihash_sha3.go +++ /dev/null @@ -1,62 +0,0 @@ -/* - This package has no purpose except to perform registration of multihashes. - - It is meant to be used as a side-effecting import, e.g. - - import ( - _ "github.com/multiformats/go-multihash/register/sha3" - ) - - This package registers several multihashes for the sha3 family. - This also includes some functions known as "shake" and "keccak", - since they share much of their implementation and come in the same repos. -*/ -package sha3 - -import ( - "hash" - - "golang.org/x/crypto/sha3" - - "github.com/multiformats/go-multihash/core" -) - -func init() { - multihash.Register(multihash.SHA3_512, sha3.New512) - multihash.Register(multihash.SHA3_384, sha3.New384) - multihash.Register(multihash.SHA3_256, sha3.New256) - multihash.Register(multihash.SHA3_224, sha3.New224) - multihash.Register(multihash.SHAKE_128, func() hash.Hash { return shakeNormalizer{sha3.NewShake128(), 128 / 8 * 2} }) - multihash.Register(multihash.SHAKE_256, func() hash.Hash { return shakeNormalizer{sha3.NewShake256(), 256 / 8 * 2} }) - multihash.Register(multihash.KECCAK_256, sha3.NewLegacyKeccak256) - multihash.Register(multihash.KECCAK_512, sha3.NewLegacyKeccak512) -} - -// sha3.ShakeHash presents a somewhat odd interface, and requires a wrapper to normalize it to the usual hash.Hash interface. -// -// Some of the fiddly bits required by this normalization probably makes it undesirable for use in the highest performance applications; -// There's at least one extra allocation in constructing it (sha3.ShakeHash is an interface, so that's one heap escape; and there's a second heap escape when this normalizer struct gets boxed into a hash.Hash interface), -// and there's at least one extra allocation in getting a sum out of it (because reading a shake hash is a mutation (!) and the API only provides cloning as a way to escape this). -// Fun. -type shakeNormalizer struct { - sha3.ShakeHash - size int -} - -func (shakeNormalizer) BlockSize() int { - return 32 // Shake doesn't have a prefered block size, apparently. An arbitrary but unsurprising and positive nonzero number has been chosen to minimize the odds of fascinating bugs. -} - -func (x shakeNormalizer) Size() int { - return x.size -} - -func (x shakeNormalizer) Sum(digest []byte) []byte { - if len(digest) < x.size { - digest = make([]byte, x.size) - } - digest = digest[0:x.size] - h2 := x.Clone() // clone it, because reading mutates this kind of hash (!) which is not the standard contract for a Hash.Sum method. - h2.Read(digest) // not capable of underreading. See sha3.ShakeSum256 for similar usage. - return digest -} diff --git a/vendor/github.com/multiformats/go-multihash/registry.go b/vendor/github.com/multiformats/go-multihash/registry.go deleted file mode 100644 index 1ca1790..0000000 --- a/vendor/github.com/multiformats/go-multihash/registry.go +++ /dev/null @@ -1,31 +0,0 @@ -package multihash - -import ( - "hash" - - mhreg "github.com/multiformats/go-multihash/core" - - _ "github.com/multiformats/go-multihash/register/all" - _ "github.com/multiformats/go-multihash/register/miniosha256" -) - -// Register is an alias for Register in the core package. -// -// Consider using the core package instead of this multihash package; -// that package does not introduce transitive dependencies except for those you opt into, -// and will can result in smaller application builds. -func Register(indicator uint64, hasherFactory func() hash.Hash) { - mhreg.Register(indicator, hasherFactory) -} - -// Register is an alias for Register in the core package. -// -// Consider using the core package instead of this multihash package; -// that package does not introduce transitive dependencies except for those you opt into, -// and will can result in smaller application builds. -func GetHasher(indicator uint64) (hash.Hash, error) { - return mhreg.GetHasher(indicator) -} - -// DefaultLengths maps a multihash indicator code to the output size for that hash, in units of bytes. -var DefaultLengths = mhreg.DefaultLengths diff --git a/vendor/github.com/multiformats/go-multihash/set.go b/vendor/github.com/multiformats/go-multihash/set.go deleted file mode 100644 index f56a275..0000000 --- a/vendor/github.com/multiformats/go-multihash/set.go +++ /dev/null @@ -1,66 +0,0 @@ -package multihash - -// Set is a set of Multihashes, holding one copy per Multihash. -type Set struct { - set map[string]struct{} -} - -// NewSet creates a new set correctly initialized. -func NewSet() *Set { - return &Set{ - set: make(map[string]struct{}), - } -} - -// Add adds a new multihash to the set. -func (s *Set) Add(m Multihash) { - s.set[string(m)] = struct{}{} -} - -// Len returns the number of elements in the set. -func (s *Set) Len() int { - return len(s.set) -} - -// Has returns true if the element is in the set. -func (s *Set) Has(m Multihash) bool { - _, ok := s.set[string(m)] - return ok -} - -// Visit adds a multihash only if it is not in the set already. Returns true -// if the multihash was added (was not in the set before). -func (s *Set) Visit(m Multihash) bool { - _, ok := s.set[string(m)] - if !ok { - s.set[string(m)] = struct{}{} - return true - } - return false -} - -// ForEach runs f(m) with each multihash in the set. If returns immediately if -// f(m) returns an error. -func (s *Set) ForEach(f func(m Multihash) error) error { - for elem := range s.set { - mh := Multihash(elem) - if err := f(mh); err != nil { - return err - } - } - return nil -} - -// Remove removes an element from the set. -func (s *Set) Remove(m Multihash) { - delete(s.set, string(m)) -} - -// All returns a slice with all the elements in the set. -func (s *Set) All() []Multihash { - out := make([]Multihash, 0, len(s.set)) - for m := range s.set { - out = append(out, Multihash(m)) - } - return out -} diff --git a/vendor/github.com/multiformats/go-multihash/sum.go b/vendor/github.com/multiformats/go-multihash/sum.go deleted file mode 100644 index 6d01fe6..0000000 --- a/vendor/github.com/multiformats/go-multihash/sum.go +++ /dev/null @@ -1,52 +0,0 @@ -package multihash - -import ( - "errors" - "fmt" - - mhreg "github.com/multiformats/go-multihash/core" -) - -// ErrSumNotSupported is returned when the Sum function code is not implemented -var ErrSumNotSupported = mhreg.ErrSumNotSupported - -var ErrLenTooLarge = errors.New("requested length was too large for digest") - -// Sum obtains the cryptographic sum of a given buffer. The length parameter -// indicates the length of the resulting digest and passing a negative value -// use default length values for the selected hash function. -func Sum(data []byte, code uint64, length int) (Multihash, error) { - // Get the algorithm. - hasher, err := GetHasher(code) - if err != nil { - return nil, err - } - - // Feed data in. - hasher.Write(data) - - // Compute final hash. - // A new slice is allocated. FUTURE: see other comment below about allocation, and review together with this line to try to improve. - sum := hasher.Sum(nil) - - // Deal with any truncation. - // Unless it's an identity multihash. Those have different rules. - if length < 0 { - length = hasher.Size() - } - if len(sum) < length { - return nil, ErrLenTooLarge - } - if length >= 0 { - if code == IDENTITY { - if length != len(sum) { - return nil, fmt.Errorf("the length of the identity hash (%d) must be equal to the length of the data (%d)", length, len(sum)) - } - } - sum = sum[:length] - } - - // Put the multihash metainfo bytes at the front of the buffer. - // FUTURE: try to improve allocations here. Encode does several which are probably avoidable, but it's the shape of the Encode method arguments that forces this. - return Encode(sum, code) -} diff --git a/vendor/github.com/multiformats/go-varint/.travis.yml b/vendor/github.com/multiformats/go-varint/.travis.yml deleted file mode 100644 index 248d09b..0000000 --- a/vendor/github.com/multiformats/go-varint/.travis.yml +++ /dev/null @@ -1,30 +0,0 @@ -os: - - linux - -language: go - -go: - - 1.11.x - -env: - global: - - GOTFLAGS="-race" - - GO111MODULE=on - matrix: - - BUILD_DEPTYPE=gomod - - -# disable travis install -install: - - true - -script: - - bash <(curl -s https://raw.githubusercontent.com/ipfs/ci-helpers/master/travis-ci/run-standard-tests.sh) - -cache: - directories: - - $GOPATH/pkg/mod - - /home/travis/.cache/go-build - -notifications: - email: false diff --git a/vendor/github.com/multiformats/go-varint/LICENSE b/vendor/github.com/multiformats/go-varint/LICENSE deleted file mode 100644 index 14121ca..0000000 --- a/vendor/github.com/multiformats/go-varint/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2019 Protocol Labs - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/multiformats/go-varint/README.md b/vendor/github.com/multiformats/go-varint/README.md deleted file mode 100644 index 57f0a4a..0000000 --- a/vendor/github.com/multiformats/go-varint/README.md +++ /dev/null @@ -1,35 +0,0 @@ -# go-varint - -[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](https://protocol.ai) -[![](https://img.shields.io/badge/project-multiformats-blue.svg?style=flat-square)](https://github.com/multiformats/multiformats) -[![](https://img.shields.io/badge/freenode-%23ipfs-blue.svg?style=flat-square)](https://webchat.freenode.net/?channels=%23ipfs) -[![](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) -[![GoDoc](https://godoc.org/github.com/multiformats/go-varint?status.svg)](https://godoc.org/github.com/multiformats/go-varint) -[![Travis CI](https://img.shields.io/travis/multiformats/go-varint.svg?style=flat-square&branch=master)](https://travis-ci.org/multiformats/go-varint) -[![codecov.io](https://img.shields.io/codecov/c/github/multiformats/go-varint.svg?style=flat-square&branch=master)](https://codecov.io/github/multiformats/go-varint?branch=master) - -> Varint helpers that enforce minimal encoding. - -## Table of Contents - -- [Install](#install) -- [Contribute](#contribute) -- [License](#license) - -## Install - -```sh -go get github.com/multiformats/go-varint -``` - -## Contribute - -Contributions welcome. Please check out [the issues](https://github.com/multiformats/go-multiaddr/issues). - -Check out our [contributing document](https://github.com/multiformats/multiformats/blob/master/contributing.md) for more information on how we work, and about contributing in general. Please be aware that all interactions related to multiformats are subject to the IPFS [Code of Conduct](https://github.com/ipfs/community/blob/master/code-of-conduct.md). - -Small note: If editing the README, please conform to the [standard-readme](https://github.com/RichardLitt/standard-readme) specification. - -## License - -[MIT](LICENSE) © 2019 Protocol Labs diff --git a/vendor/github.com/multiformats/go-varint/codecov.yml b/vendor/github.com/multiformats/go-varint/codecov.yml deleted file mode 100644 index ca8100a..0000000 --- a/vendor/github.com/multiformats/go-varint/codecov.yml +++ /dev/null @@ -1,2 +0,0 @@ -ignore: - - "multiaddr" diff --git a/vendor/github.com/multiformats/go-varint/varint.go b/vendor/github.com/multiformats/go-varint/varint.go deleted file mode 100644 index 47340d9..0000000 --- a/vendor/github.com/multiformats/go-varint/varint.go +++ /dev/null @@ -1,116 +0,0 @@ -package varint - -import ( - "encoding/binary" - "errors" - "io" - "math/bits" -) - -var ( - ErrOverflow = errors.New("varints larger than uint63 not supported") - ErrUnderflow = errors.New("varints malformed, could not reach the end") - ErrNotMinimal = errors.New("varint not minimally encoded") -) - -const ( - // MaxLenUvarint63 is the maximum number of bytes representing an uvarint in - // this encoding, supporting a maximum value of 2^63 (uint63), aka - // MaxValueUvarint63. - MaxLenUvarint63 = 9 - - // MaxValueUvarint63 is the maximum encodable uint63 value. - MaxValueUvarint63 = (1 << 63) - 1 -) - -// UvarintSize returns the size (in bytes) of `num` encoded as a unsigned varint. -// -// This may return a size greater than MaxUvarintLen63, which would be an -// illegal value, and would be rejected by readers. -func UvarintSize(num uint64) int { - bits := bits.Len64(num) - q, r := bits/7, bits%7 - size := q - if r > 0 || size == 0 { - size++ - } - return size -} - -// ToUvarint converts an unsigned integer to a varint-encoded []byte -func ToUvarint(num uint64) []byte { - buf := make([]byte, UvarintSize(num)) - n := binary.PutUvarint(buf, uint64(num)) - return buf[:n] -} - -// FromUvarint reads an unsigned varint from the beginning of buf, returns the -// varint, and the number of bytes read. -func FromUvarint(buf []byte) (uint64, int, error) { - // Modified from the go standard library. Copyright the Go Authors and - // released under the BSD License. - var x uint64 - var s uint - for i, b := range buf { - if (i == 8 && b >= 0x80) || i >= MaxLenUvarint63 { - // this is the 9th and last byte we're willing to read, but it - // signals there's more (1 in MSB). - // or this is the >= 10th byte, and for some reason we're still here. - return 0, 0, ErrOverflow - } - if b < 0x80 { - if b == 0 && s > 0 { - return 0, 0, ErrNotMinimal - } - return x | uint64(b)<= 0x80) || i >= MaxLenUvarint63 { - // this is the 9th and last byte we're willing to read, but it - // signals there's more (1 in MSB). - // or this is the >= 10th byte, and for some reason we're still here. - return 0, ErrOverflow - } - if b < 0x80 { - if b == 0 && s > 0 { - return 0, ErrNotMinimal - } - return x | uint64(b)< b { - return a - } - return b -} - -func calculateRatio(matches, length int) float64 { - if length > 0 { - return 2.0 * float64(matches) / float64(length) - } - return 1.0 -} - -type Match struct { - A int - B int - Size int -} - -type OpCode struct { - Tag byte - I1 int - I2 int - J1 int - J2 int -} - -// SequenceMatcher compares sequence of strings. The basic -// algorithm predates, and is a little fancier than, an algorithm -// published in the late 1980's by Ratcliff and Obershelp under the -// hyperbolic name "gestalt pattern matching". The basic idea is to find -// the longest contiguous matching subsequence that contains no "junk" -// elements (R-O doesn't address junk). The same idea is then applied -// recursively to the pieces of the sequences to the left and to the right -// of the matching subsequence. This does not yield minimal edit -// sequences, but does tend to yield matches that "look right" to people. -// -// SequenceMatcher tries to compute a "human-friendly diff" between two -// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the -// longest *contiguous* & junk-free matching subsequence. That's what -// catches peoples' eyes. The Windows(tm) windiff has another interesting -// notion, pairing up elements that appear uniquely in each sequence. -// That, and the method here, appear to yield more intuitive difference -// reports than does diff. This method appears to be the least vulnerable -// to synching up on blocks of "junk lines", though (like blank lines in -// ordinary text files, or maybe "

" lines in HTML files). That may be -// because this is the only method of the 3 that has a *concept* of -// "junk" . -// -// Timing: Basic R-O is cubic time worst case and quadratic time expected -// case. SequenceMatcher is quadratic time for the worst case and has -// expected-case behavior dependent in a complicated way on how many -// elements the sequences have in common; best case time is linear. -type SequenceMatcher struct { - a []string - b []string - b2j map[string][]int - IsJunk func(string) bool - autoJunk bool - bJunk map[string]struct{} - matchingBlocks []Match - fullBCount map[string]int - bPopular map[string]struct{} - opCodes []OpCode -} - -func NewMatcher(a, b []string) *SequenceMatcher { - m := SequenceMatcher{autoJunk: true} - m.SetSeqs(a, b) - return &m -} - -func NewMatcherWithJunk(a, b []string, autoJunk bool, - isJunk func(string) bool) *SequenceMatcher { - - m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk} - m.SetSeqs(a, b) - return &m -} - -// Set two sequences to be compared. -func (m *SequenceMatcher) SetSeqs(a, b []string) { - m.SetSeq1(a) - m.SetSeq2(b) -} - -// Set the first sequence to be compared. The second sequence to be compared is -// not changed. -// -// SequenceMatcher computes and caches detailed information about the second -// sequence, so if you want to compare one sequence S against many sequences, -// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other -// sequences. -// -// See also SetSeqs() and SetSeq2(). -func (m *SequenceMatcher) SetSeq1(a []string) { - if &a == &m.a { - return - } - m.a = a - m.matchingBlocks = nil - m.opCodes = nil -} - -// Set the second sequence to be compared. The first sequence to be compared is -// not changed. -func (m *SequenceMatcher) SetSeq2(b []string) { - if &b == &m.b { - return - } - m.b = b - m.matchingBlocks = nil - m.opCodes = nil - m.fullBCount = nil - m.chainB() -} - -func (m *SequenceMatcher) chainB() { - // Populate line -> index mapping - b2j := map[string][]int{} - for i, s := range m.b { - indices := b2j[s] - indices = append(indices, i) - b2j[s] = indices - } - - // Purge junk elements - m.bJunk = map[string]struct{}{} - if m.IsJunk != nil { - junk := m.bJunk - for s, _ := range b2j { - if m.IsJunk(s) { - junk[s] = struct{}{} - } - } - for s, _ := range junk { - delete(b2j, s) - } - } - - // Purge remaining popular elements - popular := map[string]struct{}{} - n := len(m.b) - if m.autoJunk && n >= 200 { - ntest := n/100 + 1 - for s, indices := range b2j { - if len(indices) > ntest { - popular[s] = struct{}{} - } - } - for s, _ := range popular { - delete(b2j, s) - } - } - m.bPopular = popular - m.b2j = b2j -} - -func (m *SequenceMatcher) isBJunk(s string) bool { - _, ok := m.bJunk[s] - return ok -} - -// Find longest matching block in a[alo:ahi] and b[blo:bhi]. -// -// If IsJunk is not defined: -// -// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where -// alo <= i <= i+k <= ahi -// blo <= j <= j+k <= bhi -// and for all (i',j',k') meeting those conditions, -// k >= k' -// i <= i' -// and if i == i', j <= j' -// -// In other words, of all maximal matching blocks, return one that -// starts earliest in a, and of all those maximal matching blocks that -// start earliest in a, return the one that starts earliest in b. -// -// If IsJunk is defined, first the longest matching block is -// determined as above, but with the additional restriction that no -// junk element appears in the block. Then that block is extended as -// far as possible by matching (only) junk elements on both sides. So -// the resulting block never matches on junk except as identical junk -// happens to be adjacent to an "interesting" match. -// -// If no blocks match, return (alo, blo, 0). -func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match { - // CAUTION: stripping common prefix or suffix would be incorrect. - // E.g., - // ab - // acab - // Longest matching block is "ab", but if common prefix is - // stripped, it's "a" (tied with "b"). UNIX(tm) diff does so - // strip, so ends up claiming that ab is changed to acab by - // inserting "ca" in the middle. That's minimal but unintuitive: - // "it's obvious" that someone inserted "ac" at the front. - // Windiff ends up at the same place as diff, but by pairing up - // the unique 'b's and then matching the first two 'a's. - besti, bestj, bestsize := alo, blo, 0 - - // find longest junk-free match - // during an iteration of the loop, j2len[j] = length of longest - // junk-free match ending with a[i-1] and b[j] - j2len := map[int]int{} - for i := alo; i != ahi; i++ { - // look at all instances of a[i] in b; note that because - // b2j has no junk keys, the loop is skipped if a[i] is junk - newj2len := map[int]int{} - for _, j := range m.b2j[m.a[i]] { - // a[i] matches b[j] - if j < blo { - continue - } - if j >= bhi { - break - } - k := j2len[j-1] + 1 - newj2len[j] = k - if k > bestsize { - besti, bestj, bestsize = i-k+1, j-k+1, k - } - } - j2len = newj2len - } - - // Extend the best by non-junk elements on each end. In particular, - // "popular" non-junk elements aren't in b2j, which greatly speeds - // the inner loop above, but also means "the best" match so far - // doesn't contain any junk *or* popular non-junk elements. - for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) && - m.a[besti-1] == m.b[bestj-1] { - besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 - } - for besti+bestsize < ahi && bestj+bestsize < bhi && - !m.isBJunk(m.b[bestj+bestsize]) && - m.a[besti+bestsize] == m.b[bestj+bestsize] { - bestsize += 1 - } - - // Now that we have a wholly interesting match (albeit possibly - // empty!), we may as well suck up the matching junk on each - // side of it too. Can't think of a good reason not to, and it - // saves post-processing the (possibly considerable) expense of - // figuring out what to do with it. In the case of an empty - // interesting match, this is clearly the right thing to do, - // because no other kind of match is possible in the regions. - for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) && - m.a[besti-1] == m.b[bestj-1] { - besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 - } - for besti+bestsize < ahi && bestj+bestsize < bhi && - m.isBJunk(m.b[bestj+bestsize]) && - m.a[besti+bestsize] == m.b[bestj+bestsize] { - bestsize += 1 - } - - return Match{A: besti, B: bestj, Size: bestsize} -} - -// Return list of triples describing matching subsequences. -// -// Each triple is of the form (i, j, n), and means that -// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in -// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are -// adjacent triples in the list, and the second is not the last triple in the -// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe -// adjacent equal blocks. -// -// The last triple is a dummy, (len(a), len(b), 0), and is the only -// triple with n==0. -func (m *SequenceMatcher) GetMatchingBlocks() []Match { - if m.matchingBlocks != nil { - return m.matchingBlocks - } - - var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match - matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match { - match := m.findLongestMatch(alo, ahi, blo, bhi) - i, j, k := match.A, match.B, match.Size - if match.Size > 0 { - if alo < i && blo < j { - matched = matchBlocks(alo, i, blo, j, matched) - } - matched = append(matched, match) - if i+k < ahi && j+k < bhi { - matched = matchBlocks(i+k, ahi, j+k, bhi, matched) - } - } - return matched - } - matched := matchBlocks(0, len(m.a), 0, len(m.b), nil) - - // It's possible that we have adjacent equal blocks in the - // matching_blocks list now. - nonAdjacent := []Match{} - i1, j1, k1 := 0, 0, 0 - for _, b := range matched { - // Is this block adjacent to i1, j1, k1? - i2, j2, k2 := b.A, b.B, b.Size - if i1+k1 == i2 && j1+k1 == j2 { - // Yes, so collapse them -- this just increases the length of - // the first block by the length of the second, and the first - // block so lengthened remains the block to compare against. - k1 += k2 - } else { - // Not adjacent. Remember the first block (k1==0 means it's - // the dummy we started with), and make the second block the - // new block to compare against. - if k1 > 0 { - nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) - } - i1, j1, k1 = i2, j2, k2 - } - } - if k1 > 0 { - nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) - } - - nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0}) - m.matchingBlocks = nonAdjacent - return m.matchingBlocks -} - -// Return list of 5-tuples describing how to turn a into b. -// -// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple -// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the -// tuple preceding it, and likewise for j1 == the previous j2. -// -// The tags are characters, with these meanings: -// -// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2] -// -// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case. -// -// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case. -// -// 'e' (equal): a[i1:i2] == b[j1:j2] -func (m *SequenceMatcher) GetOpCodes() []OpCode { - if m.opCodes != nil { - return m.opCodes - } - i, j := 0, 0 - matching := m.GetMatchingBlocks() - opCodes := make([]OpCode, 0, len(matching)) - for _, m := range matching { - // invariant: we've pumped out correct diffs to change - // a[:i] into b[:j], and the next matching block is - // a[ai:ai+size] == b[bj:bj+size]. So we need to pump - // out a diff to change a[i:ai] into b[j:bj], pump out - // the matching block, and move (i,j) beyond the match - ai, bj, size := m.A, m.B, m.Size - tag := byte(0) - if i < ai && j < bj { - tag = 'r' - } else if i < ai { - tag = 'd' - } else if j < bj { - tag = 'i' - } - if tag > 0 { - opCodes = append(opCodes, OpCode{tag, i, ai, j, bj}) - } - i, j = ai+size, bj+size - // the list of matching blocks is terminated by a - // sentinel with size 0 - if size > 0 { - opCodes = append(opCodes, OpCode{'e', ai, i, bj, j}) - } - } - m.opCodes = opCodes - return m.opCodes -} - -// Isolate change clusters by eliminating ranges with no changes. -// -// Return a generator of groups with up to n lines of context. -// Each group is in the same format as returned by GetOpCodes(). -func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode { - if n < 0 { - n = 3 - } - codes := m.GetOpCodes() - if len(codes) == 0 { - codes = []OpCode{OpCode{'e', 0, 1, 0, 1}} - } - // Fixup leading and trailing groups if they show no changes. - if codes[0].Tag == 'e' { - c := codes[0] - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2} - } - if codes[len(codes)-1].Tag == 'e' { - c := codes[len(codes)-1] - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)} - } - nn := n + n - groups := [][]OpCode{} - group := []OpCode{} - for _, c := range codes { - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - // End the current group and start a new one whenever - // there is a large range with no changes. - if c.Tag == 'e' && i2-i1 > nn { - group = append(group, OpCode{c.Tag, i1, min(i2, i1+n), - j1, min(j2, j1+n)}) - groups = append(groups, group) - group = []OpCode{} - i1, j1 = max(i1, i2-n), max(j1, j2-n) - } - group = append(group, OpCode{c.Tag, i1, i2, j1, j2}) - } - if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') { - groups = append(groups, group) - } - return groups -} - -// Return a measure of the sequences' similarity (float in [0,1]). -// -// Where T is the total number of elements in both sequences, and -// M is the number of matches, this is 2.0*M / T. -// Note that this is 1 if the sequences are identical, and 0 if -// they have nothing in common. -// -// .Ratio() is expensive to compute if you haven't already computed -// .GetMatchingBlocks() or .GetOpCodes(), in which case you may -// want to try .QuickRatio() or .RealQuickRation() first to get an -// upper bound. -func (m *SequenceMatcher) Ratio() float64 { - matches := 0 - for _, m := range m.GetMatchingBlocks() { - matches += m.Size - } - return calculateRatio(matches, len(m.a)+len(m.b)) -} - -// Return an upper bound on ratio() relatively quickly. -// -// This isn't defined beyond that it is an upper bound on .Ratio(), and -// is faster to compute. -func (m *SequenceMatcher) QuickRatio() float64 { - // viewing a and b as multisets, set matches to the cardinality - // of their intersection; this counts the number of matches - // without regard to order, so is clearly an upper bound - if m.fullBCount == nil { - m.fullBCount = map[string]int{} - for _, s := range m.b { - m.fullBCount[s] = m.fullBCount[s] + 1 - } - } - - // avail[x] is the number of times x appears in 'b' less the - // number of times we've seen it in 'a' so far ... kinda - avail := map[string]int{} - matches := 0 - for _, s := range m.a { - n, ok := avail[s] - if !ok { - n = m.fullBCount[s] - } - avail[s] = n - 1 - if n > 0 { - matches += 1 - } - } - return calculateRatio(matches, len(m.a)+len(m.b)) -} - -// Return an upper bound on ratio() very quickly. -// -// This isn't defined beyond that it is an upper bound on .Ratio(), and -// is faster to compute than either .Ratio() or .QuickRatio(). -func (m *SequenceMatcher) RealQuickRatio() float64 { - la, lb := len(m.a), len(m.b) - return calculateRatio(min(la, lb), la+lb) -} - -// Convert range to the "ed" format -func formatRangeUnified(start, stop int) string { - // Per the diff spec at http://www.unix.org/single_unix_specification/ - beginning := start + 1 // lines start numbering with one - length := stop - start - if length == 1 { - return fmt.Sprintf("%d", beginning) - } - if length == 0 { - beginning -= 1 // empty ranges begin at line just before the range - } - return fmt.Sprintf("%d,%d", beginning, length) -} - -// Unified diff parameters -type UnifiedDiff struct { - A []string // First sequence lines - FromFile string // First file name - FromDate string // First file time - B []string // Second sequence lines - ToFile string // Second file name - ToDate string // Second file time - Eol string // Headers end of line, defaults to LF - Context int // Number of context lines -} - -// Compare two sequences of lines; generate the delta as a unified diff. -// -// Unified diffs are a compact way of showing line changes and a few -// lines of context. The number of context lines is set by 'n' which -// defaults to three. -// -// By default, the diff control lines (those with ---, +++, or @@) are -// created with a trailing newline. This is helpful so that inputs -// created from file.readlines() result in diffs that are suitable for -// file.writelines() since both the inputs and outputs have trailing -// newlines. -// -// For inputs that do not have trailing newlines, set the lineterm -// argument to "" so that the output will be uniformly newline free. -// -// The unidiff format normally has a header for filenames and modification -// times. Any or all of these may be specified using strings for -// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. -// The modification times are normally expressed in the ISO 8601 format. -func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error { - buf := bufio.NewWriter(writer) - defer buf.Flush() - wf := func(format string, args ...interface{}) error { - _, err := buf.WriteString(fmt.Sprintf(format, args...)) - return err - } - ws := func(s string) error { - _, err := buf.WriteString(s) - return err - } - - if len(diff.Eol) == 0 { - diff.Eol = "\n" - } - - started := false - m := NewMatcher(diff.A, diff.B) - for _, g := range m.GetGroupedOpCodes(diff.Context) { - if !started { - started = true - fromDate := "" - if len(diff.FromDate) > 0 { - fromDate = "\t" + diff.FromDate - } - toDate := "" - if len(diff.ToDate) > 0 { - toDate = "\t" + diff.ToDate - } - if diff.FromFile != "" || diff.ToFile != "" { - err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol) - if err != nil { - return err - } - err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol) - if err != nil { - return err - } - } - } - first, last := g[0], g[len(g)-1] - range1 := formatRangeUnified(first.I1, last.I2) - range2 := formatRangeUnified(first.J1, last.J2) - if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil { - return err - } - for _, c := range g { - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - if c.Tag == 'e' { - for _, line := range diff.A[i1:i2] { - if err := ws(" " + line); err != nil { - return err - } - } - continue - } - if c.Tag == 'r' || c.Tag == 'd' { - for _, line := range diff.A[i1:i2] { - if err := ws("-" + line); err != nil { - return err - } - } - } - if c.Tag == 'r' || c.Tag == 'i' { - for _, line := range diff.B[j1:j2] { - if err := ws("+" + line); err != nil { - return err - } - } - } - } - } - return nil -} - -// Like WriteUnifiedDiff but returns the diff a string. -func GetUnifiedDiffString(diff UnifiedDiff) (string, error) { - w := &bytes.Buffer{} - err := WriteUnifiedDiff(w, diff) - return string(w.Bytes()), err -} - -// Convert range to the "ed" format. -func formatRangeContext(start, stop int) string { - // Per the diff spec at http://www.unix.org/single_unix_specification/ - beginning := start + 1 // lines start numbering with one - length := stop - start - if length == 0 { - beginning -= 1 // empty ranges begin at line just before the range - } - if length <= 1 { - return fmt.Sprintf("%d", beginning) - } - return fmt.Sprintf("%d,%d", beginning, beginning+length-1) -} - -type ContextDiff UnifiedDiff - -// Compare two sequences of lines; generate the delta as a context diff. -// -// Context diffs are a compact way of showing line changes and a few -// lines of context. The number of context lines is set by diff.Context -// which defaults to three. -// -// By default, the diff control lines (those with *** or ---) are -// created with a trailing newline. -// -// For inputs that do not have trailing newlines, set the diff.Eol -// argument to "" so that the output will be uniformly newline free. -// -// The context diff format normally has a header for filenames and -// modification times. Any or all of these may be specified using -// strings for diff.FromFile, diff.ToFile, diff.FromDate, diff.ToDate. -// The modification times are normally expressed in the ISO 8601 format. -// If not specified, the strings default to blanks. -func WriteContextDiff(writer io.Writer, diff ContextDiff) error { - buf := bufio.NewWriter(writer) - defer buf.Flush() - var diffErr error - wf := func(format string, args ...interface{}) { - _, err := buf.WriteString(fmt.Sprintf(format, args...)) - if diffErr == nil && err != nil { - diffErr = err - } - } - ws := func(s string) { - _, err := buf.WriteString(s) - if diffErr == nil && err != nil { - diffErr = err - } - } - - if len(diff.Eol) == 0 { - diff.Eol = "\n" - } - - prefix := map[byte]string{ - 'i': "+ ", - 'd': "- ", - 'r': "! ", - 'e': " ", - } - - started := false - m := NewMatcher(diff.A, diff.B) - for _, g := range m.GetGroupedOpCodes(diff.Context) { - if !started { - started = true - fromDate := "" - if len(diff.FromDate) > 0 { - fromDate = "\t" + diff.FromDate - } - toDate := "" - if len(diff.ToDate) > 0 { - toDate = "\t" + diff.ToDate - } - if diff.FromFile != "" || diff.ToFile != "" { - wf("*** %s%s%s", diff.FromFile, fromDate, diff.Eol) - wf("--- %s%s%s", diff.ToFile, toDate, diff.Eol) - } - } - - first, last := g[0], g[len(g)-1] - ws("***************" + diff.Eol) - - range1 := formatRangeContext(first.I1, last.I2) - wf("*** %s ****%s", range1, diff.Eol) - for _, c := range g { - if c.Tag == 'r' || c.Tag == 'd' { - for _, cc := range g { - if cc.Tag == 'i' { - continue - } - for _, line := range diff.A[cc.I1:cc.I2] { - ws(prefix[cc.Tag] + line) - } - } - break - } - } - - range2 := formatRangeContext(first.J1, last.J2) - wf("--- %s ----%s", range2, diff.Eol) - for _, c := range g { - if c.Tag == 'r' || c.Tag == 'i' { - for _, cc := range g { - if cc.Tag == 'd' { - continue - } - for _, line := range diff.B[cc.J1:cc.J2] { - ws(prefix[cc.Tag] + line) - } - } - break - } - } - } - return diffErr -} - -// Like WriteContextDiff but returns the diff a string. -func GetContextDiffString(diff ContextDiff) (string, error) { - w := &bytes.Buffer{} - err := WriteContextDiff(w, diff) - return string(w.Bytes()), err -} - -// Split a string on "\n" while preserving them. The output can be used -// as input for UnifiedDiff and ContextDiff structures. -func SplitLines(s string) []string { - lines := strings.SplitAfter(s, "\n") - lines[len(lines)-1] += "\n" - return lines -} diff --git a/vendor/github.com/polydawn/refmt/LICENSE b/vendor/github.com/polydawn/refmt/LICENSE deleted file mode 100644 index 9f30daa..0000000 --- a/vendor/github.com/polydawn/refmt/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 Eric Myhre - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/polydawn/refmt/obj/atlas/atlas.go b/vendor/github.com/polydawn/refmt/obj/atlas/atlas.go deleted file mode 100644 index f96b4cb..0000000 --- a/vendor/github.com/polydawn/refmt/obj/atlas/atlas.go +++ /dev/null @@ -1,79 +0,0 @@ -package atlas - -import ( - "fmt" - "reflect" -) - -type Atlas struct { - // Map typeinfo to a static description of how that type should be handled. - // (The internal machinery that will wield this information, and has memory of - // progress as it does so, is configured using the AtlasEntry, but allocated separately. - // The machinery is stateful and mutable; the AtlasEntry is not.) - // - // We use 'var rtid uintptr = reflect.ValueOf(rt).Pointer()' -- pointer of the - // value of the reflect.Type info -- as an index. - // This is both unique and correctly converges when recomputed, and much - // faster to compare against than reflect.Type (which is an interface that - // tends to contain fairly large structures). - mappings map[uintptr]*AtlasEntry - - // Mapping of tag ints to atlasEntry for quick lookups when the - // unmarshaller hits a tag. Values are a subset of `mappings`. - tagMappings map[int]*AtlasEntry - - // MapMorphism specifies the default map sorting scheme - defaultMapMorphism *MapMorphism -} - -func Build(entries ...*AtlasEntry) (Atlas, error) { - atl := Atlas{ - mappings: make(map[uintptr]*AtlasEntry), - tagMappings: make(map[int]*AtlasEntry), - defaultMapMorphism: &MapMorphism{KeySortMode_Default}, - } - for _, entry := range entries { - rtid := reflect.ValueOf(entry.Type).Pointer() - if _, exists := atl.mappings[rtid]; exists { - return Atlas{}, fmt.Errorf("repeated entry for type %v", entry.Type) - } - atl.mappings[rtid] = entry - - if entry.Tagged == true { - if prev, exists := atl.tagMappings[entry.Tag]; exists { - return Atlas{}, fmt.Errorf("repeated tag %v on type %v (already mapped to type %v)", entry.Tag, entry.Type, prev.Type) - } - atl.tagMappings[entry.Tag] = entry - } - } - return atl, nil -} -func MustBuild(entries ...*AtlasEntry) Atlas { - atl, err := Build(entries...) - if err != nil { - panic(err) - } - return atl -} - -func (atl Atlas) WithMapMorphism(m MapMorphism) Atlas { - atl.defaultMapMorphism = &m - return atl -} - -// Gets the AtlasEntry for a typeID. Used by obj package, not meant for user facing. -func (atl Atlas) Get(rtid uintptr) (*AtlasEntry, bool) { - ent, ok := atl.mappings[rtid] - return ent, ok -} - -// Gets the AtlasEntry for a tag int. Used by obj package, not meant for user facing. -func (atl Atlas) GetEntryByTag(tag int) (*AtlasEntry, bool) { - ent, ok := atl.tagMappings[tag] - return ent, ok -} - -// Gets the default map morphism config. Used by obj package, not meant for user facing. -func (atl Atlas) GetDefaultMapMorphism() *MapMorphism { - return atl.defaultMapMorphism -} diff --git a/vendor/github.com/polydawn/refmt/obj/atlas/atlasCommon.go b/vendor/github.com/polydawn/refmt/obj/atlas/atlasCommon.go deleted file mode 100644 index 611481b..0000000 --- a/vendor/github.com/polydawn/refmt/obj/atlas/atlasCommon.go +++ /dev/null @@ -1,10 +0,0 @@ -package atlas - -// A type to enumerate key sorting modes. -type KeySortMode string - -const ( - KeySortMode_Default = KeySortMode("default") // the default mode -- for structs, this is the source-order of the fields; for maps, it's identify to "strings" sort mode. - KeySortMode_Strings = KeySortMode("strings") // lexical sort by strings. this *is* the default for maps; it overrides source-order sorting for structs. - KeySortMode_RFC7049 = KeySortMode("rfc7049") // "Canonical" as proposed by rfc7049 § 3.9 (shorter byte sequences sort to top). -) diff --git a/vendor/github.com/polydawn/refmt/obj/atlas/atlasEntry.go b/vendor/github.com/polydawn/refmt/obj/atlas/atlasEntry.go deleted file mode 100644 index 4c3ee45..0000000 --- a/vendor/github.com/polydawn/refmt/obj/atlas/atlasEntry.go +++ /dev/null @@ -1,150 +0,0 @@ -package atlas - -import ( - "reflect" -) - -/* - The AtlasEntry is a declarative roadmap of what we should do for - marshal and unmarshal of a single object, keyed by type. - - There are a lot of paths your mappings might want to take: - - - For a struct type, you may simply want to specify some alternate keys, or some to leave out, etc. - - For an interface type, you probably want to specify one of our interface muxing strategies - with a mapping between enumstr:typeinfo (and, what to do if we get a struct we don't recognize). - - For a string, int, or other primitive, you don't need to say anything: defaults will DTRT. - - For a typedef'd string, int, or other primitive, you *still* don't need to say anything: but, - if you want custom behavior (say, transform the string to an int at the last second, and back again), - you can specify transformer functions for that. - - For a struct type that you want to turn into a whole different kind (like a string): use - those same transform functions. (You'll no longer need a FieldMap.) - - For the most esoteric needs, you can fall all the way back to providing a custom MarshalMachine - (but avoid that; it's a lot of work, and one of these other transform methods should suffice). -*/ -type AtlasEntry struct { - // The reflect info of the type this morphism is regarding. - Type reflect.Type - - // -------------------------------------------------------- - // The big escape valves: wanna map to some other kind completely? - // -------------------------------------------------------- - - // Transforms the value we reached by walking (the 'live' value -- which - // must be of `this.Type`) into another value (the 'serialable' value -- - // which will be of `this.MarshalTransformTargetType`). - // - // The target type may be anything, even of a completely different Kind! - // - // This transform func runs first, then the resulting value is - // serialized (by running through the path through Atlas again, so - // chaining of transform funcs is supported, though not recommended). - MarshalTransformFunc MarshalTransformFunc - // The type of value we expect after using the MarshalTransformFunc. - // - // The match between transform func and target type should be checked - // during construction of this AtlasEntry. - MarshalTransformTargetType reflect.Type - - // Expects a different type (the 'serialable' value -- which will be of - // 'this.UnmarshalTransformTargetType') than the value we reached by - // walking (the 'live' value -- which must be of `this.Type`). - // - // The target type may be anything, even of a completely different Kind! - // - // The unmarshal of that target type will be run first, then the - // resulting value is fed through this function to produce the real value, - // which is then placed correctly into bigger mid-unmarshal object tree. - // - // For non-primitives, unmarshal of the target type will always target - // an empty pointer or empty slice, roughly as per if it was - // operating on a value produced by `TargetType.New()`. - UnmarshalTransformFunc UnmarshalTransformFunc - // The type of value we will manufacture an instance of and unmarshal - // into, then when done provide to the UnmarshalTransformFunc. - // - // The match between transform func and target type should be checked - // during construction of this AtlasEntry. - UnmarshalTransformTargetType reflect.Type - - // -------------------------------------------------------- - // Standard options for how to map (varies by Kind) - // -------------------------------------------------------- - - // A "tag" to emit when marshalling this type of value; - // and when unmarshalling, this tag will cause unmarshal to pick - // this atlas (and if there's conflicting type info, error). - Tag int - // Flag for whether the Tag feature should be used (zero is a valid tag). - Tagged bool - - // A mapping of fields in a struct to serial keys. - // Only valid if `this.Type.Kind() == Struct`. - StructMap *StructMap - - // Configuration for how to traverse a map kind. - // Only valid if `this.Type.Kind() == Map`. - MapMorphism *MapMorphism - - // Configuration for how to pick concrete types to fill a union interface. - // Only valid if `this.Type.Kind() == Interface`. - UnionKeyedMorphism *UnionKeyedMorphism - - // FUTURE: enum-ish primitives, multiplexers for interfaces, - // lots of such things will belong here. - - // -------------------------------------------------------- - // Hooks, validate helpers - // -------------------------------------------------------- - - // A validation function which will be called for the whole value - // after unmarshalling reached the end of the object. - // If it returns an error, the entire unmarshal will error. - // - // Not used in marshalling. - // Not reachable if an UnmarshalTransform is set. - ValidateFn func(v interface{}) error -} - -func BuildEntry(typeHintObj interface{}) *BuilderCore { - rt := reflect.TypeOf(typeHintObj) - if rt.Kind() == reflect.Ptr { - if rt.Elem().Kind() == reflect.Interface { - rt = rt.Elem() - } else { - panic("invalid atlas build: use the bare object, not a pointer (refmt will handle pointers automatically)") - } - } - return &BuilderCore{ - &AtlasEntry{Type: rt}, - } -} - -/* - Intermediate step in building an AtlasEntry: use `BuildEntry` to - get one of these to start with, then call one of the methods - on this type to get a specialized builder which has the methods - relevant for setting up that specific kind of mapping. - - One full example of using this builder may look like the following: - - atlas.BuildEntry(Formula{}).StructMap().Autogenerate().Complete() - - Some intermediate manipulations may be performed on this object, - for example setting the "tag" (if you want to use cbor tagging), - before calling the specializer method. - In this case, just keep chaining the configuration calls like so: - - atlas.BuildEntry(Formula{}).UseTag(4000) - .StructMap().Autogenerate().Complete() - -*/ -type BuilderCore struct { - entry *AtlasEntry -} - -func (x *BuilderCore) UseTag(tag int) *BuilderCore { - x.entry.Tagged = true - x.entry.Tag = tag - return x -} diff --git a/vendor/github.com/polydawn/refmt/obj/atlas/doc.go b/vendor/github.com/polydawn/refmt/obj/atlas/doc.go deleted file mode 100644 index 52e6ddb..0000000 --- a/vendor/github.com/polydawn/refmt/obj/atlas/doc.go +++ /dev/null @@ -1,45 +0,0 @@ -/* - Atlas types are used to define how to map Go values into refmt token streams. - - Atlas information may be autogenerated based on struct tags automatically, - but you can also specify custom AtlasEntry info to use advanced features - and define custom transformations. - - An Atlas is a collection of AtlasEntry (plus some internal indexing). - Typical usage is to declare an AtlasEntry for your structs (often near by the - struct definition), then - - Building an AtlasEntry for some type called `Formula` looks like this: - - atlas.BuildEntry(Formula{}).StructMap().Autogenerate().Complete() - - Building an AtlasEntry always starts with `atlas.BuildEntry(x)` where `x` is - a dummy object used to convey type information. - The next function in the chain declares what kind of behavior we're going - to use to turn that type of object into its serial form. - (In the above example, we're declaring that we want refmt to see the `Formula` - type as a struct and traverse its fields. There are many other options!) - Subsequent functions are specific to what kind of walking and mapping we've - chosen. For struct walking, this may involve declaring fields and custom serial - names to map them to; for a "Transform" we'd instead have to provide callbacks - to do the transformation from the `Formula` type to some other type; etcetera. - The final function in the chain is always called `Complete`, and returns - a ready-to-use AtlasEntry. - - Building a complete Atlas for a whole suite of serializable types is as - easy as putting a bunch of them together: - - atlas.Build( - atlas.BuildEntry(Foo{}).StructMap().Autogenerate().Complete(), - atlas.BuildEntry(Bar{}).StructMap().Autogenerate().Complete(), - atlas.BuildEntry(Baz{}).StructMap().Autogenerate().Complete(), - ) - - You can put your entire protocol into one Atlas. - It's also possible to build several different Atlases each with different - sets of AtlasEntry. This may be useful if you have a protocol where some - messages are not valid during some phases of communication, and you would - like to use the Atlas as a form of whitelisting for what can be - marshalled/unmarshalled. -*/ -package atlas diff --git a/vendor/github.com/polydawn/refmt/obj/atlas/errors.go b/vendor/github.com/polydawn/refmt/obj/atlas/errors.go deleted file mode 100644 index 48fe37f..0000000 --- a/vendor/github.com/polydawn/refmt/obj/atlas/errors.go +++ /dev/null @@ -1,14 +0,0 @@ -package atlas - -// Error type raised when initializing an Atlas, and field entries do -// not resolve against the type. -// (If you recently refactored names of fields in your types, check -// to make sure you updated any references to those fields by name to match!) -type ErrStructureMismatch struct { - TypeName string - Reason string -} - -func (e ErrStructureMismatch) Error() string { - return "structure mismatch: " + e.TypeName + " " + e.Reason -} diff --git a/vendor/github.com/polydawn/refmt/obj/atlas/mapMorphism.go b/vendor/github.com/polydawn/refmt/obj/atlas/mapMorphism.go deleted file mode 100644 index b5602cd..0000000 --- a/vendor/github.com/polydawn/refmt/obj/atlas/mapMorphism.go +++ /dev/null @@ -1,38 +0,0 @@ -package atlas - -import ( - "fmt" - "reflect" -) - -type MapMorphism struct { - KeySortMode KeySortMode -} - -func (x *BuilderCore) MapMorphism() *BuilderMapMorphism { - if x.entry.Type.Kind() != reflect.Map { - panic(fmt.Errorf("cannot use mapMorphism for type %q, which is kind %s", x.entry.Type, x.entry.Type.Kind())) - } - x.entry.MapMorphism = &MapMorphism{ - KeySortMode_Default, - } - return &BuilderMapMorphism{x.entry} -} - -type BuilderMapMorphism struct { - entry *AtlasEntry -} - -func (x *BuilderMapMorphism) Complete() *AtlasEntry { - return x.entry -} - -func (x *BuilderMapMorphism) SetKeySortMode(km KeySortMode) *BuilderMapMorphism { - switch km { - case KeySortMode_Default, KeySortMode_Strings, KeySortMode_RFC7049: - x.entry.MapMorphism.KeySortMode = km - default: - panic(fmt.Errorf("invalid key sort mode %q", km)) - } - return x -} diff --git a/vendor/github.com/polydawn/refmt/obj/atlas/structMap.go b/vendor/github.com/polydawn/refmt/obj/atlas/structMap.go deleted file mode 100644 index 0ede93c..0000000 --- a/vendor/github.com/polydawn/refmt/obj/atlas/structMap.go +++ /dev/null @@ -1,46 +0,0 @@ -package atlas - -import "reflect" - -type StructMap struct { - // A slice of descriptions of each field in the type. - // Each entry specifies the name by which each field should be referenced - // when serialized, and defines a way to get an address to the field. - Fields []StructMapEntry -} - -type StructMapEntry struct { - // The field name; will be emitted as token during marshal, and used for - // lookup during unmarshal. Required. - SerialName string - - // If true, a key token with this SerialName will be ignored during unmarshal. - // (By default, if there's no StructMapEntry for a key token, it's an error.) - // If true, the ReflectRoute, Type, etc fields are irrelevant and may be nil. - Ignore bool - - ReflectRoute ReflectRoute // reflection generates these. - Type reflect.Type // type to expect on the far side of the ReflectRoute. - tagged bool // used during autogen. - - // Theoretical feature which would be alternative to ReflectRoute. Support dropped for the moment. - //addrFunc func(interface{}) interface{} // custom user function. - - // If true, marshalling will skip this field if it's the zero value. - OmitEmpty bool -} - -type ReflectRoute []int - -func (rr ReflectRoute) TraverseToValue(v reflect.Value) reflect.Value { - for _, i := range rr { - if v.Kind() == reflect.Ptr { - if v.IsNil() { - return reflect.Value{} - } - v = v.Elem() - } - v = v.Field(i) - } - return v -} diff --git a/vendor/github.com/polydawn/refmt/obj/atlas/structMapAutogen.go b/vendor/github.com/polydawn/refmt/obj/atlas/structMapAutogen.go deleted file mode 100644 index 32fb57c..0000000 --- a/vendor/github.com/polydawn/refmt/obj/atlas/structMapAutogen.go +++ /dev/null @@ -1,320 +0,0 @@ -package atlas - -import ( - "fmt" - "reflect" - "sort" - "strings" - "unicode" -) - -func AutogenerateStructMapEntry(rt reflect.Type) *AtlasEntry { - return AutogenerateStructMapEntryUsingTags(rt, "refmt", KeySortMode_Default) -} - -func AutogenerateStructMapEntryUsingTags(rt reflect.Type, tagName string, sorter KeySortMode) *AtlasEntry { - if rt.Kind() != reflect.Struct { - panic(fmt.Errorf("cannot use structMap for type %q, which is kind %s", rt, rt.Kind())) - } - entry := &AtlasEntry{ - Type: rt, - StructMap: &StructMap{Fields: exploreFields(rt, tagName, sorter)}, - } - return entry -} - -// exploreFields returns a list of fields that StructAtlas should recognize for the given type. -// The algorithm is breadth-first search over the set of structs to include - the top struct -// and then any reachable anonymous structs. -func exploreFields(rt reflect.Type, tagName string, sorter KeySortMode) []StructMapEntry { - // Anonymous fields to explore at the current level and the next. - current := []StructMapEntry{} - next := []StructMapEntry{{Type: rt}} - - // Count of queued names for current level and the next. - count := map[reflect.Type]int{} - nextCount := map[reflect.Type]int{} - - // Types already visited at an earlier level. - visited := map[reflect.Type]bool{} - - // Fields found. - var fields []StructMapEntry - - for len(next) > 0 { - current, next = next, current[:0] - count, nextCount = nextCount, map[reflect.Type]int{} - - for _, f := range current { - if visited[f.Type] { - continue - } - visited[f.Type] = true - - // Scan f.Type for fields to include. - for i := 0; i < f.Type.NumField(); i++ { - sf := f.Type.Field(i) - if sf.PkgPath != "" && !sf.Anonymous { // unexported - continue - } - tag := sf.Tag.Get(tagName) - if tag == "-" { - continue - } - name, opts := parseTag(tag) - if !isValidTag(name) { - name = "" - } - route := make([]int, len(f.ReflectRoute)+1) - copy(route, f.ReflectRoute) - route[len(f.ReflectRoute)] = i - - ft := sf.Type - if ft.Name() == "" && ft.Kind() == reflect.Ptr { - // Follow pointer. - ft = ft.Elem() - } - - // Record found field and index sequence. - if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { - tagged := name != "" - if name == "" { - name = downcaseFirstLetter(sf.Name) - } - fields = append(fields, StructMapEntry{ - SerialName: name, - ReflectRoute: route, - Type: sf.Type, - tagged: tagged, - OmitEmpty: opts.Contains("omitempty"), - }) - if count[f.Type] > 1 { - // If there were multiple instances, add a second, - // so that the annihilation code will see a duplicate. - // It only cares about the distinction between 1 or 2, - // so don't bother generating any more copies. - fields = append(fields, fields[len(fields)-1]) - } - continue - } - - // Record new anonymous struct to explore in next round. - nextCount[ft]++ - if nextCount[ft] == 1 { - next = append(next, StructMapEntry{ - ReflectRoute: route, - Type: ft, - }) - } - } - } - } - - sort.Sort(StructMapEntry_byName(fields)) - - // Delete all fields that are hidden by the Go rules for embedded fields, - // except that fields with JSON tags are promoted. - - // The fields are sorted in primary order of name, secondary order - // of field index length. Loop over names; for each name, delete - // hidden fields by choosing the one dominant field that survives. - out := fields[:0] - for advance, i := 0, 0; i < len(fields); i += advance { - // One iteration per name. - // Find the sequence of fields with the name of this first field. - fi := fields[i] - name := fi.SerialName - for advance = 1; i+advance < len(fields); advance++ { - fj := fields[i+advance] - if fj.SerialName != name { - break - } - } - if advance == 1 { // Only one field with this name - out = append(out, fi) - continue - } - dominant, ok := dominantField(fields[i : i+advance]) - if ok { - out = append(out, dominant) - } - } - - fields = out - switch sorter { - case KeySortMode_Default: - sort.Sort(StructMapEntry_byFieldRoute(fields)) - case KeySortMode_Strings: - //sort.Sort(StructMapEntry_byName(fields)) - // it's already in this order, though, so, pass - case KeySortMode_RFC7049: - sort.Sort(StructMapEntry_RFC7049(fields)) - default: - panic("invalid struct sorter option") - } - - return fields -} - -// If the first character of the string is uppercase, return a string -// where it is switched to lowercase. -// We use this to make go field names look more like what everyone else -// in the universe expects their json to look like by default: snakeCase. -func downcaseFirstLetter(s string) string { - if s == "" { - return "" - } - r := rune(s[0]) // if multibyte chars: you're left alone. - if !unicode.IsUpper(r) { - return s - } - return string(unicode.ToLower(r)) + s[1:] -} - -// dominantField looks through the fields, all of which are known to -// have the same name, to find the single field that dominates the -// others using Go's embedding rules, modified by the presence of -// JSON tags. If there are multiple top-level fields, the boolean -// will be false: This condition is an error in Go and we skip all -// the fields. -func dominantField(fields []StructMapEntry) (StructMapEntry, bool) { - // The fields are sorted in increasing index-length order. The winner - // must therefore be one with the shortest index length. Drop all - // longer entries, which is easy: just truncate the slice. - length := len(fields[0].ReflectRoute) - tagged := -1 // Index of first tagged field. - for i, f := range fields { - if len(f.ReflectRoute) > length { - fields = fields[:i] - break - } - if f.tagged { - if tagged >= 0 { - // Multiple tagged fields at the same level: conflict. - // Return no field. - return StructMapEntry{}, false - } - tagged = i - } - } - if tagged >= 0 { - return fields[tagged], true - } - // All remaining fields have the same length. If there's more than one, - // we have a conflict (two fields named "X" at the same level) and we - // return no field. - if len(fields) > 1 { - return StructMapEntry{}, false - } - return fields[0], true -} - -// StructMapEntry_byName sorts field by name, -// breaking ties with depth, -// then breaking ties with "name came from tag", -// then breaking ties with FieldRoute sequence. -type StructMapEntry_byName []StructMapEntry - -func (x StructMapEntry_byName) Len() int { return len(x) } -func (x StructMapEntry_byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } -func (x StructMapEntry_byName) Less(i, j int) bool { - if x[i].SerialName != x[j].SerialName { - return x[i].SerialName < x[j].SerialName - } - if len(x[i].ReflectRoute) != len(x[j].ReflectRoute) { - return len(x[i].ReflectRoute) < len(x[j].ReflectRoute) - } - if x[i].tagged != x[j].tagged { - return x[i].tagged - } - return StructMapEntry_byFieldRoute(x).Less(i, j) -} - -// StructMapEntry_RFC7049 sorts fields as specified in RFC7049, -type StructMapEntry_RFC7049 []StructMapEntry - -func (x StructMapEntry_RFC7049) Len() int { return len(x) } -func (x StructMapEntry_RFC7049) Swap(i, j int) { x[i], x[j] = x[j], x[i] } -func (x StructMapEntry_RFC7049) Less(i, j int) bool { - il, jl := len(x[i].SerialName), len(x[j].SerialName) - switch { - case il < jl: - return true - case il > jl: - return false - default: - return x[i].SerialName < x[j].SerialName - } -} - -// StructMapEntry_byFieldRoute sorts field by FieldRoute sequence -// (e.g., roughly source declaration order within each type). -type StructMapEntry_byFieldRoute []StructMapEntry - -func (x StructMapEntry_byFieldRoute) Len() int { return len(x) } -func (x StructMapEntry_byFieldRoute) Swap(i, j int) { x[i], x[j] = x[j], x[i] } -func (x StructMapEntry_byFieldRoute) Less(i, j int) bool { - for k, xik := range x[i].ReflectRoute { - if k >= len(x[j].ReflectRoute) { - return false - } - if xik != x[j].ReflectRoute[k] { - return xik < x[j].ReflectRoute[k] - } - } - return len(x[i].ReflectRoute) < len(x[j].ReflectRoute) -} - -// tagOptions is the string following a comma in a struct field's -// tag, or the empty string. It does not include the leading comma. -type tagOptions string - -// parseTag splits a struct field's tag into its name and -// comma-separated options. -func parseTag(tag string) (string, tagOptions) { - if idx := strings.Index(tag, ","); idx != -1 { - return tag[:idx], tagOptions(tag[idx+1:]) - } - return tag, tagOptions("") -} - -// Contains reports whether a comma-separated list of options -// contains a particular substr flag. substr must be surrounded by a -// string boundary or commas. -func (o tagOptions) Contains(optionName string) bool { - if len(o) == 0 { - return false - } - s := string(o) - for s != "" { - var next string - i := strings.Index(s, ",") - if i >= 0 { - s, next = s[:i], s[i+1:] - } - if s == optionName { - return true - } - s = next - } - return false -} - -func isValidTag(s string) bool { - if s == "" { - return false - } - for _, c := range s { - switch { - case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): - // Backslash and quote chars are reserved, but - // otherwise any punctuation chars are allowed - // in a tag name. - default: - if !unicode.IsLetter(c) && !unicode.IsDigit(c) { - return false - } - } - } - return true -} diff --git a/vendor/github.com/polydawn/refmt/obj/atlas/structMapBuilding.go b/vendor/github.com/polydawn/refmt/obj/atlas/structMapBuilding.go deleted file mode 100644 index 1c9fd32..0000000 --- a/vendor/github.com/polydawn/refmt/obj/atlas/structMapBuilding.go +++ /dev/null @@ -1,96 +0,0 @@ -package atlas - -import ( - "fmt" - "reflect" - "strings" -) - -func (x *BuilderCore) StructMap() *BuilderStructMap { - if x.entry.Type.Kind() != reflect.Struct { - panic(fmt.Errorf("cannot use structMap for type %q, which is kind %s", x.entry.Type, x.entry.Type.Kind())) - } - x.entry.StructMap = &StructMap{} - return &BuilderStructMap{x.entry} -} - -type BuilderStructMap struct { - entry *AtlasEntry -} - -func (x *BuilderStructMap) Complete() *AtlasEntry { - return x.entry -} - -/* - Add a field to the mapping based on its name. - - Given a struct: - - struct{ - X int - Y struct{ Z int } - } - - `AddField("X", {"x", ...}) will cause that field to be serialized as key "x"; - `AddField("Y.Z", {"z", ...})` will cause that *nested* field to be serialized - as key "z" in the same object (e.g. "x" and "z" will be siblings). - - Returns the mutated builder for convenient call chaining. - - If the fieldName string doesn't map onto the structure type info, - a panic will be raised. -*/ -func (x *BuilderStructMap) AddField(fieldName string, mapping StructMapEntry) *BuilderStructMap { - fieldNameSplit := strings.Split(fieldName, ".") - rr, rt, err := fieldNameToReflectRoute(x.entry.Type, fieldNameSplit) - if err != nil { - panic(err) // REVIEW: now that we have the builder obj, we could just curry these into it until 'Complete' is called (or, thus, 'MustComplete'!). - } - mapping.ReflectRoute = rr - mapping.Type = rt - x.entry.StructMap.Fields = append(x.entry.StructMap.Fields, mapping) - return x -} - -func (x *BuilderStructMap) IgnoreKey(serialKeyName string) *BuilderStructMap { - x.entry.StructMap.Fields = append(x.entry.StructMap.Fields, StructMapEntry{ - SerialName: serialKeyName, - Ignore: true, - }) - return x -} - -func fieldNameToReflectRoute(rt reflect.Type, fieldNameSplit []string) (rr ReflectRoute, _ reflect.Type, _ error) { - for _, fn := range fieldNameSplit { - rf, ok := rt.FieldByName(fn) - if !ok { - return nil, nil, ErrStructureMismatch{rt.Name(), "does not have field named " + fn} - } - rr = append(rr, rf.Index...) - rt = rf.Type - } - return rr, rt, nil -} - -/* - Automatically generate mappings by looking at the struct type info, - taking any hints from tags, and appending that to the builder. - - You may use autogeneration in concert with manually adding field mappings, - though if doing so be mindful not to map the same fields twice. -*/ -func (x *BuilderStructMap) Autogenerate() *BuilderStructMap { - autoEntry := AutogenerateStructMapEntry(x.entry.Type) - x.entry.StructMap.Fields = append(x.entry.StructMap.Fields, autoEntry.StructMap.Fields...) - return x -} - -/* - Automatically generate mappings using a given struct field sorting scheme -*/ -func (x *BuilderStructMap) AutogenerateWithSortingScheme(sorting KeySortMode) *BuilderStructMap { - autoEntry := AutogenerateStructMapEntryUsingTags(x.entry.Type, "refmt", sorting) - x.entry.StructMap.Fields = append(x.entry.StructMap.Fields, autoEntry.StructMap.Fields...) - return x -} diff --git a/vendor/github.com/polydawn/refmt/obj/atlas/transformBuilding.go b/vendor/github.com/polydawn/refmt/obj/atlas/transformBuilding.go deleted file mode 100644 index 6892fdc..0000000 --- a/vendor/github.com/polydawn/refmt/obj/atlas/transformBuilding.go +++ /dev/null @@ -1,30 +0,0 @@ -package atlas - -import ( - "reflect" -) - -func (x *BuilderCore) Transform() *BuilderTransform { - // no checks on x.entry.Type.Kind() here -- transforms can be pretty much any<->any - return &BuilderTransform{x.entry} -} - -type BuilderTransform struct { - entry *AtlasEntry -} - -func (x *BuilderTransform) Complete() *AtlasEntry { - return x.entry -} - -func (x *BuilderTransform) TransformMarshal(trFunc MarshalTransformFunc, toType reflect.Type) *BuilderTransform { - x.entry.MarshalTransformFunc = trFunc - x.entry.MarshalTransformTargetType = toType - return x -} - -func (x *BuilderTransform) TransformUnmarshal(trFunc UnmarshalTransformFunc, toType reflect.Type) *BuilderTransform { - x.entry.UnmarshalTransformFunc = trFunc - x.entry.UnmarshalTransformTargetType = toType - return x -} diff --git a/vendor/github.com/polydawn/refmt/obj/atlas/transformFuncs.go b/vendor/github.com/polydawn/refmt/obj/atlas/transformFuncs.go deleted file mode 100644 index 5cfe98f..0000000 --- a/vendor/github.com/polydawn/refmt/obj/atlas/transformFuncs.go +++ /dev/null @@ -1,68 +0,0 @@ -package atlas - -import "reflect" - -type MarshalTransformFunc func(liveForm reflect.Value) (serialForm reflect.Value, err error) -type UnmarshalTransformFunc func(serialForm reflect.Value) (liveForm reflect.Value, err error) - -var err_rt = reflect.TypeOf((*error)(nil)).Elem() - -/* - Takes a wildcard object which must be `func (live T1) (serialable T2, error)` - and returns a MarshalTransformFunc and the typeinfo of T2. -*/ -func MakeMarshalTransformFunc(fn interface{}) (MarshalTransformFunc, reflect.Type) { - fn_rv := reflect.ValueOf(fn) - if fn_rv.Kind() != reflect.Func { - panic("no") - } - fn_rt := fn_rv.Type() - if fn_rt.NumIn() != 1 { - panic("no") - } - if fn_rt.NumOut() != 2 { - panic("no") - } - if !fn_rt.Out(1).AssignableTo(err_rt) { - panic("no") - } - // nothing to do for `fn_rt.In(0)` -- whatever type it is... TODO redesign to make less sketchy; we should most certainly be able to check this in the builder - out_rt := fn_rt.Out(0) - return func(liveForm reflect.Value) (serialForm reflect.Value, err error) { - results := fn_rv.Call([]reflect.Value{liveForm}) - if results[1].IsNil() { - return results[0], nil - } - return results[0], results[1].Interface().(error) - }, out_rt -} - -/* - Takes a wildcard object which must be `func (serialable T1) (live T2, error)` - and returns a UnmarshalTransformFunc and the typeinfo of T1. -*/ -func MakeUnmarshalTransformFunc(fn interface{}) (UnmarshalTransformFunc, reflect.Type) { - fn_rv := reflect.ValueOf(fn) - if fn_rv.Kind() != reflect.Func { - panic("no") - } - fn_rt := fn_rv.Type() - if fn_rt.NumIn() != 1 { - panic("no") - } - if fn_rt.NumOut() != 2 { - panic("no") - } - if !fn_rt.Out(1).AssignableTo(err_rt) { - panic("no") - } - // nothing to do for checking `fn_rf.Out(0)` -- because we don't know what entry we're about to be used for. TODO redesign to make less sketchy. - in_rt := fn_rt.In(0) - return func(serialForm reflect.Value) (liveForm reflect.Value, err error) { - results := fn_rv.Call([]reflect.Value{serialForm}) - if results[1].IsNil() { - return results[0], nil - } - return results[0], results[1].Interface().(error) - }, in_rt -} diff --git a/vendor/github.com/polydawn/refmt/obj/atlas/unionMorphism.go b/vendor/github.com/polydawn/refmt/obj/atlas/unionMorphism.go deleted file mode 100644 index 8083185..0000000 --- a/vendor/github.com/polydawn/refmt/obj/atlas/unionMorphism.go +++ /dev/null @@ -1,45 +0,0 @@ -package atlas - -import ( - "fmt" - "reflect" - "sort" -) - -type UnionKeyedMorphism struct { - // Mapping of typehint key strings to atlasEntry that should be delegated to. - Elements map[string]*AtlasEntry - // Mapping of rtid to string (roughly the dual of the Elements map). - Mappings map[uintptr]string - // Purely to have in readiness for error messaging. - KnownMembers []string -} - -func (x *BuilderCore) KeyedUnion() *BuilderUnionKeyedMorphism { - if x.entry.Type.Kind() != reflect.Interface { - panic(fmt.Errorf("cannot use union morphisms for type %q, which is kind %s", x.entry.Type, x.entry.Type.Kind())) - } - x.entry.UnionKeyedMorphism = &UnionKeyedMorphism{ - Elements: make(map[string]*AtlasEntry), - Mappings: make(map[uintptr]string), - } - return &BuilderUnionKeyedMorphism{x.entry} -} - -type BuilderUnionKeyedMorphism struct { - entry *AtlasEntry -} - -func (x *BuilderUnionKeyedMorphism) Of(elements map[string]*AtlasEntry) *AtlasEntry { - cfg := x.entry.UnionKeyedMorphism - for hint, ent := range elements { - // FIXME: check that all the delegates are... well struct or map machines really, but definitely blacklisting other delegating machinery. - // FIXME: and sanity check that they can all be assigned to the interface ffs. - - cfg.Elements[hint] = ent - cfg.Mappings[reflect.ValueOf(ent.Type).Pointer()] = hint - cfg.KnownMembers = append(cfg.KnownMembers, hint) - } - sort.Strings(cfg.KnownMembers) - return x.entry -} diff --git a/vendor/github.com/spaolacci/murmur3/.gitignore b/vendor/github.com/spaolacci/murmur3/.gitignore deleted file mode 100644 index 0026861..0000000 --- a/vendor/github.com/spaolacci/murmur3/.gitignore +++ /dev/null @@ -1,22 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe diff --git a/vendor/github.com/spaolacci/murmur3/.travis.yml b/vendor/github.com/spaolacci/murmur3/.travis.yml deleted file mode 100644 index 9bfca9c..0000000 --- a/vendor/github.com/spaolacci/murmur3/.travis.yml +++ /dev/null @@ -1,7 +0,0 @@ -language: go - -go: - - 1.x - - master - -script: go test diff --git a/vendor/github.com/spaolacci/murmur3/LICENSE b/vendor/github.com/spaolacci/murmur3/LICENSE deleted file mode 100644 index 2a46fd7..0000000 --- a/vendor/github.com/spaolacci/murmur3/LICENSE +++ /dev/null @@ -1,24 +0,0 @@ -Copyright 2013, Sébastien Paolacci. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the library nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/spaolacci/murmur3/README.md b/vendor/github.com/spaolacci/murmur3/README.md deleted file mode 100644 index e463678..0000000 --- a/vendor/github.com/spaolacci/murmur3/README.md +++ /dev/null @@ -1,86 +0,0 @@ -murmur3 -======= - -[![Build Status](https://travis-ci.org/spaolacci/murmur3.svg?branch=master)](https://travis-ci.org/spaolacci/murmur3) - -Native Go implementation of Austin Appleby's third MurmurHash revision (aka -MurmurHash3). - -Reference algorithm has been slightly hacked as to support the streaming mode -required by Go's standard [Hash interface](http://golang.org/pkg/hash/#Hash). - - -Benchmarks ----------- - -Go tip as of 2014-06-12 (i.e almost go1.3), core i7 @ 3.4 Ghz. All runs -include hasher instantiation and sequence finalization. - -

-
-Benchmark32_1        500000000     7.69 ns/op      130.00 MB/s
-Benchmark32_2        200000000     8.83 ns/op      226.42 MB/s
-Benchmark32_4        500000000     7.99 ns/op      500.39 MB/s
-Benchmark32_8        200000000     9.47 ns/op      844.69 MB/s
-Benchmark32_16       100000000     12.1 ns/op     1321.61 MB/s
-Benchmark32_32       100000000     18.3 ns/op     1743.93 MB/s
-Benchmark32_64        50000000     30.9 ns/op     2071.64 MB/s
-Benchmark32_128       50000000     57.6 ns/op     2222.96 MB/s
-Benchmark32_256       20000000      116 ns/op     2188.60 MB/s
-Benchmark32_512       10000000      226 ns/op     2260.59 MB/s
-Benchmark32_1024       5000000      452 ns/op     2263.73 MB/s
-Benchmark32_2048       2000000      891 ns/op     2296.02 MB/s
-Benchmark32_4096       1000000     1787 ns/op     2290.92 MB/s
-Benchmark32_8192        500000     3593 ns/op     2279.68 MB/s
-Benchmark128_1       100000000     26.1 ns/op       38.33 MB/s
-Benchmark128_2       100000000     29.0 ns/op       69.07 MB/s
-Benchmark128_4        50000000     29.8 ns/op      134.17 MB/s
-Benchmark128_8        50000000     31.6 ns/op      252.86 MB/s
-Benchmark128_16      100000000     26.5 ns/op      603.42 MB/s
-Benchmark128_32      100000000     28.6 ns/op     1117.15 MB/s
-Benchmark128_64       50000000     35.5 ns/op     1800.97 MB/s
-Benchmark128_128      50000000     50.9 ns/op     2515.50 MB/s
-Benchmark128_256      20000000     76.9 ns/op     3330.11 MB/s
-Benchmark128_512      20000000      135 ns/op     3769.09 MB/s
-Benchmark128_1024     10000000      250 ns/op     4094.38 MB/s
-Benchmark128_2048      5000000      477 ns/op     4290.75 MB/s
-Benchmark128_4096      2000000      940 ns/op     4353.29 MB/s
-Benchmark128_8192      1000000     1838 ns/op     4455.47 MB/s
-
-
- - -
-
-benchmark              Go1.0 MB/s    Go1.1 MB/s  speedup    Go1.2 MB/s  speedup    Go1.3 MB/s  speedup
-Benchmark32_1               98.90        118.59    1.20x        114.79    0.97x        130.00    1.13x
-Benchmark32_2              168.04        213.31    1.27x        210.65    0.99x        226.42    1.07x
-Benchmark32_4              414.01        494.19    1.19x        490.29    0.99x        500.39    1.02x
-Benchmark32_8              662.19        836.09    1.26x        836.46    1.00x        844.69    1.01x
-Benchmark32_16             917.46       1304.62    1.42x       1297.63    0.99x       1321.61    1.02x
-Benchmark32_32            1141.93       1737.54    1.52x       1728.24    0.99x       1743.93    1.01x
-Benchmark32_64            1289.47       2039.51    1.58x       2038.20    1.00x       2071.64    1.02x
-Benchmark32_128           1299.23       2097.63    1.61x       2177.13    1.04x       2222.96    1.02x
-Benchmark32_256           1369.90       2202.34    1.61x       2213.15    1.00x       2188.60    0.99x
-Benchmark32_512           1399.56       2255.72    1.61x       2264.49    1.00x       2260.59    1.00x
-Benchmark32_1024          1410.90       2285.82    1.62x       2270.99    0.99x       2263.73    1.00x
-Benchmark32_2048          1422.14       2297.62    1.62x       2269.59    0.99x       2296.02    1.01x
-Benchmark32_4096          1420.53       2307.81    1.62x       2273.43    0.99x       2290.92    1.01x
-Benchmark32_8192          1424.79       2312.87    1.62x       2286.07    0.99x       2279.68    1.00x
-Benchmark128_1               8.32         30.15    3.62x         30.84    1.02x         38.33    1.24x
-Benchmark128_2              16.38         59.72    3.65x         59.37    0.99x         69.07    1.16x
-Benchmark128_4              32.26        112.96    3.50x        114.24    1.01x        134.17    1.17x
-Benchmark128_8              62.68        217.88    3.48x        218.18    1.00x        252.86    1.16x
-Benchmark128_16            128.47        451.57    3.51x        474.65    1.05x        603.42    1.27x
-Benchmark128_32            246.18        910.42    3.70x        871.06    0.96x       1117.15    1.28x
-Benchmark128_64            449.05       1477.64    3.29x       1449.24    0.98x       1800.97    1.24x
-Benchmark128_128           762.61       2222.42    2.91x       2217.30    1.00x       2515.50    1.13x
-Benchmark128_256          1179.92       3005.46    2.55x       2931.55    0.98x       3330.11    1.14x
-Benchmark128_512          1616.51       3590.75    2.22x       3592.08    1.00x       3769.09    1.05x
-Benchmark128_1024         1964.36       3979.67    2.03x       4034.01    1.01x       4094.38    1.01x
-Benchmark128_2048         2225.07       4156.93    1.87x       4244.17    1.02x       4290.75    1.01x
-Benchmark128_4096         2360.15       4299.09    1.82x       4392.35    1.02x       4353.29    0.99x
-Benchmark128_8192         2411.50       4356.84    1.81x       4480.68    1.03x       4455.47    0.99x
-
-
- diff --git a/vendor/github.com/spaolacci/murmur3/murmur.go b/vendor/github.com/spaolacci/murmur3/murmur.go deleted file mode 100644 index 1252cf7..0000000 --- a/vendor/github.com/spaolacci/murmur3/murmur.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2013, Sébastien Paolacci. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package murmur3 implements Austin Appleby's non-cryptographic MurmurHash3. - - Reference implementation: - http://code.google.com/p/smhasher/wiki/MurmurHash3 - - History, characteristics and (legacy) perfs: - https://sites.google.com/site/murmurhash/ - https://sites.google.com/site/murmurhash/statistics -*/ -package murmur3 - -type bmixer interface { - bmix(p []byte) (tail []byte) - Size() (n int) - reset() -} - -type digest struct { - clen int // Digested input cumulative length. - tail []byte // 0 to Size()-1 bytes view of `buf'. - buf [16]byte // Expected (but not required) to be Size() large. - seed uint32 // Seed for initializing the hash. - bmixer -} - -func (d *digest) BlockSize() int { return 1 } - -func (d *digest) Write(p []byte) (n int, err error) { - n = len(p) - d.clen += n - - if len(d.tail) > 0 { - // Stick back pending bytes. - nfree := d.Size() - len(d.tail) // nfree ∈ [1, d.Size()-1]. - if nfree < len(p) { - // One full block can be formed. - block := append(d.tail, p[:nfree]...) - p = p[nfree:] - _ = d.bmix(block) // No tail. - } else { - // Tail's buf is large enough to prevent reallocs. - p = append(d.tail, p...) - } - } - - d.tail = d.bmix(p) - - // Keep own copy of the 0 to Size()-1 pending bytes. - nn := copy(d.buf[:], d.tail) - d.tail = d.buf[:nn] - - return n, nil -} - -func (d *digest) Reset() { - d.clen = 0 - d.tail = nil - d.bmixer.reset() -} diff --git a/vendor/github.com/spaolacci/murmur3/murmur128.go b/vendor/github.com/spaolacci/murmur3/murmur128.go deleted file mode 100644 index a4b618b..0000000 --- a/vendor/github.com/spaolacci/murmur3/murmur128.go +++ /dev/null @@ -1,203 +0,0 @@ -package murmur3 - -import ( - //"encoding/binary" - "hash" - "unsafe" -) - -const ( - c1_128 = 0x87c37b91114253d5 - c2_128 = 0x4cf5ad432745937f -) - -// Make sure interfaces are correctly implemented. -var ( - _ hash.Hash = new(digest128) - _ Hash128 = new(digest128) - _ bmixer = new(digest128) -) - -// Hash128 represents a 128-bit hasher -// Hack: the standard api doesn't define any Hash128 interface. -type Hash128 interface { - hash.Hash - Sum128() (uint64, uint64) -} - -// digest128 represents a partial evaluation of a 128 bites hash. -type digest128 struct { - digest - h1 uint64 // Unfinalized running hash part 1. - h2 uint64 // Unfinalized running hash part 2. -} - -// New128 returns a 128-bit hasher -func New128() Hash128 { return New128WithSeed(0) } - -// New128WithSeed returns a 128-bit hasher set with explicit seed value -func New128WithSeed(seed uint32) Hash128 { - d := new(digest128) - d.seed = seed - d.bmixer = d - d.Reset() - return d -} - -func (d *digest128) Size() int { return 16 } - -func (d *digest128) reset() { d.h1, d.h2 = uint64(d.seed), uint64(d.seed) } - -func (d *digest128) Sum(b []byte) []byte { - h1, h2 := d.Sum128() - return append(b, - byte(h1>>56), byte(h1>>48), byte(h1>>40), byte(h1>>32), - byte(h1>>24), byte(h1>>16), byte(h1>>8), byte(h1), - - byte(h2>>56), byte(h2>>48), byte(h2>>40), byte(h2>>32), - byte(h2>>24), byte(h2>>16), byte(h2>>8), byte(h2), - ) -} - -func (d *digest128) bmix(p []byte) (tail []byte) { - h1, h2 := d.h1, d.h2 - - nblocks := len(p) / 16 - for i := 0; i < nblocks; i++ { - t := (*[2]uint64)(unsafe.Pointer(&p[i*16])) - k1, k2 := t[0], t[1] - - k1 *= c1_128 - k1 = (k1 << 31) | (k1 >> 33) // rotl64(k1, 31) - k1 *= c2_128 - h1 ^= k1 - - h1 = (h1 << 27) | (h1 >> 37) // rotl64(h1, 27) - h1 += h2 - h1 = h1*5 + 0x52dce729 - - k2 *= c2_128 - k2 = (k2 << 33) | (k2 >> 31) // rotl64(k2, 33) - k2 *= c1_128 - h2 ^= k2 - - h2 = (h2 << 31) | (h2 >> 33) // rotl64(h2, 31) - h2 += h1 - h2 = h2*5 + 0x38495ab5 - } - d.h1, d.h2 = h1, h2 - return p[nblocks*d.Size():] -} - -func (d *digest128) Sum128() (h1, h2 uint64) { - - h1, h2 = d.h1, d.h2 - - var k1, k2 uint64 - switch len(d.tail) & 15 { - case 15: - k2 ^= uint64(d.tail[14]) << 48 - fallthrough - case 14: - k2 ^= uint64(d.tail[13]) << 40 - fallthrough - case 13: - k2 ^= uint64(d.tail[12]) << 32 - fallthrough - case 12: - k2 ^= uint64(d.tail[11]) << 24 - fallthrough - case 11: - k2 ^= uint64(d.tail[10]) << 16 - fallthrough - case 10: - k2 ^= uint64(d.tail[9]) << 8 - fallthrough - case 9: - k2 ^= uint64(d.tail[8]) << 0 - - k2 *= c2_128 - k2 = (k2 << 33) | (k2 >> 31) // rotl64(k2, 33) - k2 *= c1_128 - h2 ^= k2 - - fallthrough - - case 8: - k1 ^= uint64(d.tail[7]) << 56 - fallthrough - case 7: - k1 ^= uint64(d.tail[6]) << 48 - fallthrough - case 6: - k1 ^= uint64(d.tail[5]) << 40 - fallthrough - case 5: - k1 ^= uint64(d.tail[4]) << 32 - fallthrough - case 4: - k1 ^= uint64(d.tail[3]) << 24 - fallthrough - case 3: - k1 ^= uint64(d.tail[2]) << 16 - fallthrough - case 2: - k1 ^= uint64(d.tail[1]) << 8 - fallthrough - case 1: - k1 ^= uint64(d.tail[0]) << 0 - k1 *= c1_128 - k1 = (k1 << 31) | (k1 >> 33) // rotl64(k1, 31) - k1 *= c2_128 - h1 ^= k1 - } - - h1 ^= uint64(d.clen) - h2 ^= uint64(d.clen) - - h1 += h2 - h2 += h1 - - h1 = fmix64(h1) - h2 = fmix64(h2) - - h1 += h2 - h2 += h1 - - return h1, h2 -} - -func fmix64(k uint64) uint64 { - k ^= k >> 33 - k *= 0xff51afd7ed558ccd - k ^= k >> 33 - k *= 0xc4ceb9fe1a85ec53 - k ^= k >> 33 - return k -} - -/* -func rotl64(x uint64, r byte) uint64 { - return (x << r) | (x >> (64 - r)) -} -*/ - -// Sum128 returns the MurmurHash3 sum of data. It is equivalent to the -// following sequence (without the extra burden and the extra allocation): -// hasher := New128() -// hasher.Write(data) -// return hasher.Sum128() -func Sum128(data []byte) (h1 uint64, h2 uint64) { return Sum128WithSeed(data, 0) } - -// Sum128WithSeed returns the MurmurHash3 sum of data. It is equivalent to the -// following sequence (without the extra burden and the extra allocation): -// hasher := New128WithSeed(seed) -// hasher.Write(data) -// return hasher.Sum128() -func Sum128WithSeed(data []byte, seed uint32) (h1 uint64, h2 uint64) { - d := &digest128{h1: uint64(seed), h2: uint64(seed)} - d.seed = seed - d.tail = d.bmix(data) - d.clen = len(data) - return d.Sum128() -} diff --git a/vendor/github.com/spaolacci/murmur3/murmur32.go b/vendor/github.com/spaolacci/murmur3/murmur32.go deleted file mode 100644 index e32c995..0000000 --- a/vendor/github.com/spaolacci/murmur3/murmur32.go +++ /dev/null @@ -1,167 +0,0 @@ -package murmur3 - -// http://code.google.com/p/guava-libraries/source/browse/guava/src/com/google/common/hash/Murmur3_32HashFunction.java - -import ( - "hash" - "unsafe" -) - -// Make sure interfaces are correctly implemented. -var ( - _ hash.Hash = new(digest32) - _ hash.Hash32 = new(digest32) - _ bmixer = new(digest32) -) - -const ( - c1_32 uint32 = 0xcc9e2d51 - c2_32 uint32 = 0x1b873593 -) - -// digest32 represents a partial evaluation of a 32 bites hash. -type digest32 struct { - digest - h1 uint32 // Unfinalized running hash. -} - -// New32 returns new 32-bit hasher -func New32() hash.Hash32 { return New32WithSeed(0) } - -// New32WithSeed returns new 32-bit hasher set with explicit seed value -func New32WithSeed(seed uint32) hash.Hash32 { - d := new(digest32) - d.seed = seed - d.bmixer = d - d.Reset() - return d -} - -func (d *digest32) Size() int { return 4 } - -func (d *digest32) reset() { d.h1 = d.seed } - -func (d *digest32) Sum(b []byte) []byte { - h := d.Sum32() - return append(b, byte(h>>24), byte(h>>16), byte(h>>8), byte(h)) -} - -// Digest as many blocks as possible. -func (d *digest32) bmix(p []byte) (tail []byte) { - h1 := d.h1 - - nblocks := len(p) / 4 - for i := 0; i < nblocks; i++ { - k1 := *(*uint32)(unsafe.Pointer(&p[i*4])) - - k1 *= c1_32 - k1 = (k1 << 15) | (k1 >> 17) // rotl32(k1, 15) - k1 *= c2_32 - - h1 ^= k1 - h1 = (h1 << 13) | (h1 >> 19) // rotl32(h1, 13) - h1 = h1*4 + h1 + 0xe6546b64 - } - d.h1 = h1 - return p[nblocks*d.Size():] -} - -func (d *digest32) Sum32() (h1 uint32) { - - h1 = d.h1 - - var k1 uint32 - switch len(d.tail) & 3 { - case 3: - k1 ^= uint32(d.tail[2]) << 16 - fallthrough - case 2: - k1 ^= uint32(d.tail[1]) << 8 - fallthrough - case 1: - k1 ^= uint32(d.tail[0]) - k1 *= c1_32 - k1 = (k1 << 15) | (k1 >> 17) // rotl32(k1, 15) - k1 *= c2_32 - h1 ^= k1 - } - - h1 ^= uint32(d.clen) - - h1 ^= h1 >> 16 - h1 *= 0x85ebca6b - h1 ^= h1 >> 13 - h1 *= 0xc2b2ae35 - h1 ^= h1 >> 16 - - return h1 -} - -/* -func rotl32(x uint32, r byte) uint32 { - return (x << r) | (x >> (32 - r)) -} -*/ - -// Sum32 returns the MurmurHash3 sum of data. It is equivalent to the -// following sequence (without the extra burden and the extra allocation): -// hasher := New32() -// hasher.Write(data) -// return hasher.Sum32() -func Sum32(data []byte) uint32 { return Sum32WithSeed(data, 0) } - -// Sum32WithSeed returns the MurmurHash3 sum of data. It is equivalent to the -// following sequence (without the extra burden and the extra allocation): -// hasher := New32WithSeed(seed) -// hasher.Write(data) -// return hasher.Sum32() -func Sum32WithSeed(data []byte, seed uint32) uint32 { - - h1 := seed - - nblocks := len(data) / 4 - var p uintptr - if len(data) > 0 { - p = uintptr(unsafe.Pointer(&data[0])) - } - p1 := p + uintptr(4*nblocks) - for ; p < p1; p += 4 { - k1 := *(*uint32)(unsafe.Pointer(p)) - - k1 *= c1_32 - k1 = (k1 << 15) | (k1 >> 17) // rotl32(k1, 15) - k1 *= c2_32 - - h1 ^= k1 - h1 = (h1 << 13) | (h1 >> 19) // rotl32(h1, 13) - h1 = h1*4 + h1 + 0xe6546b64 - } - - tail := data[nblocks*4:] - - var k1 uint32 - switch len(tail) & 3 { - case 3: - k1 ^= uint32(tail[2]) << 16 - fallthrough - case 2: - k1 ^= uint32(tail[1]) << 8 - fallthrough - case 1: - k1 ^= uint32(tail[0]) - k1 *= c1_32 - k1 = (k1 << 15) | (k1 >> 17) // rotl32(k1, 15) - k1 *= c2_32 - h1 ^= k1 - } - - h1 ^= uint32(len(data)) - - h1 ^= h1 >> 16 - h1 *= 0x85ebca6b - h1 ^= h1 >> 13 - h1 *= 0xc2b2ae35 - h1 ^= h1 >> 16 - - return h1 -} diff --git a/vendor/github.com/spaolacci/murmur3/murmur64.go b/vendor/github.com/spaolacci/murmur3/murmur64.go deleted file mode 100644 index 65a410a..0000000 --- a/vendor/github.com/spaolacci/murmur3/murmur64.go +++ /dev/null @@ -1,57 +0,0 @@ -package murmur3 - -import ( - "hash" -) - -// Make sure interfaces are correctly implemented. -var ( - _ hash.Hash = new(digest64) - _ hash.Hash64 = new(digest64) - _ bmixer = new(digest64) -) - -// digest64 is half a digest128. -type digest64 digest128 - -// New64 returns a 64-bit hasher -func New64() hash.Hash64 { return New64WithSeed(0) } - -// New64WithSeed returns a 64-bit hasher set with explicit seed value -func New64WithSeed(seed uint32) hash.Hash64 { - d := (*digest64)(New128WithSeed(seed).(*digest128)) - return d -} - -func (d *digest64) Sum(b []byte) []byte { - h1 := d.Sum64() - return append(b, - byte(h1>>56), byte(h1>>48), byte(h1>>40), byte(h1>>32), - byte(h1>>24), byte(h1>>16), byte(h1>>8), byte(h1)) -} - -func (d *digest64) Sum64() uint64 { - h1, _ := (*digest128)(d).Sum128() - return h1 -} - -// Sum64 returns the MurmurHash3 sum of data. It is equivalent to the -// following sequence (without the extra burden and the extra allocation): -// hasher := New64() -// hasher.Write(data) -// return hasher.Sum64() -func Sum64(data []byte) uint64 { return Sum64WithSeed(data, 0) } - -// Sum64WithSeed returns the MurmurHash3 sum of data. It is equivalent to the -// following sequence (without the extra burden and the extra allocation): -// hasher := New64WithSeed(seed) -// hasher.Write(data) -// return hasher.Sum64() -func Sum64WithSeed(data []byte, seed uint32) uint64 { - d := &digest128{h1: uint64(seed), h2: uint64(seed)} - d.seed = seed - d.tail = d.bmix(data) - d.clen = len(data) - h1, _ := d.Sum128() - return h1 -} diff --git a/vendor/github.com/stretchr/testify/LICENSE b/vendor/github.com/stretchr/testify/LICENSE deleted file mode 100644 index 4b0421c..0000000 --- a/vendor/github.com/stretchr/testify/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2012-2020 Mat Ryer, Tyler Bunnell and contributors. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go deleted file mode 100644 index 41649d2..0000000 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go +++ /dev/null @@ -1,394 +0,0 @@ -package assert - -import ( - "fmt" - "reflect" -) - -type CompareType int - -const ( - compareLess CompareType = iota - 1 - compareEqual - compareGreater -) - -var ( - intType = reflect.TypeOf(int(1)) - int8Type = reflect.TypeOf(int8(1)) - int16Type = reflect.TypeOf(int16(1)) - int32Type = reflect.TypeOf(int32(1)) - int64Type = reflect.TypeOf(int64(1)) - - uintType = reflect.TypeOf(uint(1)) - uint8Type = reflect.TypeOf(uint8(1)) - uint16Type = reflect.TypeOf(uint16(1)) - uint32Type = reflect.TypeOf(uint32(1)) - uint64Type = reflect.TypeOf(uint64(1)) - - float32Type = reflect.TypeOf(float32(1)) - float64Type = reflect.TypeOf(float64(1)) - - stringType = reflect.TypeOf("") -) - -func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { - obj1Value := reflect.ValueOf(obj1) - obj2Value := reflect.ValueOf(obj2) - - // throughout this switch we try and avoid calling .Convert() if possible, - // as this has a pretty big performance impact - switch kind { - case reflect.Int: - { - intobj1, ok := obj1.(int) - if !ok { - intobj1 = obj1Value.Convert(intType).Interface().(int) - } - intobj2, ok := obj2.(int) - if !ok { - intobj2 = obj2Value.Convert(intType).Interface().(int) - } - if intobj1 > intobj2 { - return compareGreater, true - } - if intobj1 == intobj2 { - return compareEqual, true - } - if intobj1 < intobj2 { - return compareLess, true - } - } - case reflect.Int8: - { - int8obj1, ok := obj1.(int8) - if !ok { - int8obj1 = obj1Value.Convert(int8Type).Interface().(int8) - } - int8obj2, ok := obj2.(int8) - if !ok { - int8obj2 = obj2Value.Convert(int8Type).Interface().(int8) - } - if int8obj1 > int8obj2 { - return compareGreater, true - } - if int8obj1 == int8obj2 { - return compareEqual, true - } - if int8obj1 < int8obj2 { - return compareLess, true - } - } - case reflect.Int16: - { - int16obj1, ok := obj1.(int16) - if !ok { - int16obj1 = obj1Value.Convert(int16Type).Interface().(int16) - } - int16obj2, ok := obj2.(int16) - if !ok { - int16obj2 = obj2Value.Convert(int16Type).Interface().(int16) - } - if int16obj1 > int16obj2 { - return compareGreater, true - } - if int16obj1 == int16obj2 { - return compareEqual, true - } - if int16obj1 < int16obj2 { - return compareLess, true - } - } - case reflect.Int32: - { - int32obj1, ok := obj1.(int32) - if !ok { - int32obj1 = obj1Value.Convert(int32Type).Interface().(int32) - } - int32obj2, ok := obj2.(int32) - if !ok { - int32obj2 = obj2Value.Convert(int32Type).Interface().(int32) - } - if int32obj1 > int32obj2 { - return compareGreater, true - } - if int32obj1 == int32obj2 { - return compareEqual, true - } - if int32obj1 < int32obj2 { - return compareLess, true - } - } - case reflect.Int64: - { - int64obj1, ok := obj1.(int64) - if !ok { - int64obj1 = obj1Value.Convert(int64Type).Interface().(int64) - } - int64obj2, ok := obj2.(int64) - if !ok { - int64obj2 = obj2Value.Convert(int64Type).Interface().(int64) - } - if int64obj1 > int64obj2 { - return compareGreater, true - } - if int64obj1 == int64obj2 { - return compareEqual, true - } - if int64obj1 < int64obj2 { - return compareLess, true - } - } - case reflect.Uint: - { - uintobj1, ok := obj1.(uint) - if !ok { - uintobj1 = obj1Value.Convert(uintType).Interface().(uint) - } - uintobj2, ok := obj2.(uint) - if !ok { - uintobj2 = obj2Value.Convert(uintType).Interface().(uint) - } - if uintobj1 > uintobj2 { - return compareGreater, true - } - if uintobj1 == uintobj2 { - return compareEqual, true - } - if uintobj1 < uintobj2 { - return compareLess, true - } - } - case reflect.Uint8: - { - uint8obj1, ok := obj1.(uint8) - if !ok { - uint8obj1 = obj1Value.Convert(uint8Type).Interface().(uint8) - } - uint8obj2, ok := obj2.(uint8) - if !ok { - uint8obj2 = obj2Value.Convert(uint8Type).Interface().(uint8) - } - if uint8obj1 > uint8obj2 { - return compareGreater, true - } - if uint8obj1 == uint8obj2 { - return compareEqual, true - } - if uint8obj1 < uint8obj2 { - return compareLess, true - } - } - case reflect.Uint16: - { - uint16obj1, ok := obj1.(uint16) - if !ok { - uint16obj1 = obj1Value.Convert(uint16Type).Interface().(uint16) - } - uint16obj2, ok := obj2.(uint16) - if !ok { - uint16obj2 = obj2Value.Convert(uint16Type).Interface().(uint16) - } - if uint16obj1 > uint16obj2 { - return compareGreater, true - } - if uint16obj1 == uint16obj2 { - return compareEqual, true - } - if uint16obj1 < uint16obj2 { - return compareLess, true - } - } - case reflect.Uint32: - { - uint32obj1, ok := obj1.(uint32) - if !ok { - uint32obj1 = obj1Value.Convert(uint32Type).Interface().(uint32) - } - uint32obj2, ok := obj2.(uint32) - if !ok { - uint32obj2 = obj2Value.Convert(uint32Type).Interface().(uint32) - } - if uint32obj1 > uint32obj2 { - return compareGreater, true - } - if uint32obj1 == uint32obj2 { - return compareEqual, true - } - if uint32obj1 < uint32obj2 { - return compareLess, true - } - } - case reflect.Uint64: - { - uint64obj1, ok := obj1.(uint64) - if !ok { - uint64obj1 = obj1Value.Convert(uint64Type).Interface().(uint64) - } - uint64obj2, ok := obj2.(uint64) - if !ok { - uint64obj2 = obj2Value.Convert(uint64Type).Interface().(uint64) - } - if uint64obj1 > uint64obj2 { - return compareGreater, true - } - if uint64obj1 == uint64obj2 { - return compareEqual, true - } - if uint64obj1 < uint64obj2 { - return compareLess, true - } - } - case reflect.Float32: - { - float32obj1, ok := obj1.(float32) - if !ok { - float32obj1 = obj1Value.Convert(float32Type).Interface().(float32) - } - float32obj2, ok := obj2.(float32) - if !ok { - float32obj2 = obj2Value.Convert(float32Type).Interface().(float32) - } - if float32obj1 > float32obj2 { - return compareGreater, true - } - if float32obj1 == float32obj2 { - return compareEqual, true - } - if float32obj1 < float32obj2 { - return compareLess, true - } - } - case reflect.Float64: - { - float64obj1, ok := obj1.(float64) - if !ok { - float64obj1 = obj1Value.Convert(float64Type).Interface().(float64) - } - float64obj2, ok := obj2.(float64) - if !ok { - float64obj2 = obj2Value.Convert(float64Type).Interface().(float64) - } - if float64obj1 > float64obj2 { - return compareGreater, true - } - if float64obj1 == float64obj2 { - return compareEqual, true - } - if float64obj1 < float64obj2 { - return compareLess, true - } - } - case reflect.String: - { - stringobj1, ok := obj1.(string) - if !ok { - stringobj1 = obj1Value.Convert(stringType).Interface().(string) - } - stringobj2, ok := obj2.(string) - if !ok { - stringobj2 = obj2Value.Convert(stringType).Interface().(string) - } - if stringobj1 > stringobj2 { - return compareGreater, true - } - if stringobj1 == stringobj2 { - return compareEqual, true - } - if stringobj1 < stringobj2 { - return compareLess, true - } - } - } - - return compareEqual, false -} - -// Greater asserts that the first element is greater than the second -// -// assert.Greater(t, 2, 1) -// assert.Greater(t, float64(2), float64(1)) -// assert.Greater(t, "b", "a") -func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs) -} - -// GreaterOrEqual asserts that the first element is greater than or equal to the second -// -// assert.GreaterOrEqual(t, 2, 1) -// assert.GreaterOrEqual(t, 2, 2) -// assert.GreaterOrEqual(t, "b", "a") -// assert.GreaterOrEqual(t, "b", "b") -func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs) -} - -// Less asserts that the first element is less than the second -// -// assert.Less(t, 1, 2) -// assert.Less(t, float64(1), float64(2)) -// assert.Less(t, "a", "b") -func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs) -} - -// LessOrEqual asserts that the first element is less than or equal to the second -// -// assert.LessOrEqual(t, 1, 2) -// assert.LessOrEqual(t, 2, 2) -// assert.LessOrEqual(t, "a", "b") -// assert.LessOrEqual(t, "b", "b") -func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs) -} - -// Positive asserts that the specified element is positive -// -// assert.Positive(t, 1) -// assert.Positive(t, 1.23) -func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { - zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs) -} - -// Negative asserts that the specified element is negative -// -// assert.Negative(t, -1) -// assert.Negative(t, -1.23) -func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { - zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs) -} - -func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - e1Kind := reflect.ValueOf(e1).Kind() - e2Kind := reflect.ValueOf(e2).Kind() - if e1Kind != e2Kind { - return Fail(t, "Elements should be the same type", msgAndArgs...) - } - - compareResult, isComparable := compare(e1, e2, e1Kind) - if !isComparable { - return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...) - } - - if !containsValue(allowedComparesResults, compareResult) { - return Fail(t, fmt.Sprintf(failMessage, e1, e2), msgAndArgs...) - } - - return true -} - -func containsValue(values []CompareType, value CompareType) bool { - for _, v := range values { - if v == value { - return true - } - } - - return false -} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go deleted file mode 100644 index 79bd6c0..0000000 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ /dev/null @@ -1,585 +0,0 @@ -/* -* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen -* THIS FILE MUST NOT BE EDITED BY HAND - */ - -package assert - -// Conditionf uses a Comparison to assert a complex condition. -func Conditionf(t TestingT, comp Comparison, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Condition(t, comp, append([]interface{}{msg}, args...)...) -} - -// Containsf asserts that the specified string, list(array, slice...) or map contains the -// specified substring or element. -// -// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted") -// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted") -// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted") -func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Contains(t, s, contains, append([]interface{}{msg}, args...)...) -} - -// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified -// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, -// the number of appearances of each of them in both lists should match. -// -// assert.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") -func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return ElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...) -} - -// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// assert.Emptyf(t, obj, "error message %s", "formatted") -func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Empty(t, object, append([]interface{}{msg}, args...)...) -} - -// Equalf asserts that two objects are equal. -// -// assert.Equalf(t, 123, 123, "error message %s", "formatted") -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). Function equality -// cannot be determined and will always fail. -func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Equal(t, expected, actual, append([]interface{}{msg}, args...)...) -} - -// EqualErrorf asserts that a function returned an error (i.e. not `nil`) -// and that it is equal to the provided error. -// -// actualObj, err := SomeFunction() -// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted") -func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return EqualError(t, theError, errString, append([]interface{}{msg}, args...)...) -} - -// EqualValuesf asserts that two objects are equal or convertable to the same types -// and equal. -// -// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") -func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return EqualValues(t, expected, actual, append([]interface{}{msg}, args...)...) -} - -// Errorf asserts that a function returned an error (i.e. not `nil`). -// -// actualObj, err := SomeFunction() -// if assert.Errorf(t, err, "error message %s", "formatted") { -// assert.Equal(t, expectedErrorf, err) -// } -func Errorf(t TestingT, err error, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Error(t, err, append([]interface{}{msg}, args...)...) -} - -// ErrorAsf asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value. -// This is a wrapper for errors.As. -func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return ErrorAs(t, err, target, append([]interface{}{msg}, args...)...) -} - -// ErrorIsf asserts that at least one of the errors in err's chain matches target. -// This is a wrapper for errors.Is. -func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return ErrorIs(t, err, target, append([]interface{}{msg}, args...)...) -} - -// Exactlyf asserts that two objects are equal in value and type. -// -// assert.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted") -func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Exactly(t, expected, actual, append([]interface{}{msg}, args...)...) -} - -// Failf reports a failure through -func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Fail(t, failureMessage, append([]interface{}{msg}, args...)...) -} - -// FailNowf fails test -func FailNowf(t TestingT, failureMessage string, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return FailNow(t, failureMessage, append([]interface{}{msg}, args...)...) -} - -// Falsef asserts that the specified value is false. -// -// assert.Falsef(t, myBool, "error message %s", "formatted") -func Falsef(t TestingT, value bool, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return False(t, value, append([]interface{}{msg}, args...)...) -} - -// Greaterf asserts that the first element is greater than the second -// -// assert.Greaterf(t, 2, 1, "error message %s", "formatted") -// assert.Greaterf(t, float64(2), float64(1), "error message %s", "formatted") -// assert.Greaterf(t, "b", "a", "error message %s", "formatted") -func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Greater(t, e1, e2, append([]interface{}{msg}, args...)...) -} - -// GreaterOrEqualf asserts that the first element is greater than or equal to the second -// -// assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted") -// assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted") -// assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted") -// assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted") -func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return GreaterOrEqual(t, e1, e2, append([]interface{}{msg}, args...)...) -} - -// Implementsf asserts that an object is implemented by the specified interface. -// -// assert.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") -func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Implements(t, interfaceObject, object, append([]interface{}{msg}, args...)...) -} - -// InDeltaf asserts that the two numerals are within delta of each other. -// -// assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted") -func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return InDelta(t, expected, actual, delta, append([]interface{}{msg}, args...)...) -} - -// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. -func InDeltaMapValuesf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return InDeltaMapValues(t, expected, actual, delta, append([]interface{}{msg}, args...)...) -} - -// InDeltaSlicef is the same as InDelta, except it compares two slices. -func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return InDeltaSlice(t, expected, actual, delta, append([]interface{}{msg}, args...)...) -} - -// InEpsilonf asserts that expected and actual have a relative error less than epsilon -func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return InEpsilon(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...) -} - -// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices. -func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return InEpsilonSlice(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...) -} - -// IsDecreasingf asserts that the collection is decreasing -// -// assert.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted") -// assert.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted") -// assert.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted") -func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return IsDecreasing(t, object, append([]interface{}{msg}, args...)...) -} - -// IsIncreasingf asserts that the collection is increasing -// -// assert.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted") -// assert.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted") -// assert.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted") -func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return IsIncreasing(t, object, append([]interface{}{msg}, args...)...) -} - -// IsNonDecreasingf asserts that the collection is not decreasing -// -// assert.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted") -// assert.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted") -// assert.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted") -func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return IsNonDecreasing(t, object, append([]interface{}{msg}, args...)...) -} - -// IsNonIncreasingf asserts that the collection is not increasing -// -// assert.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted") -// assert.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted") -// assert.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted") -func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return IsNonIncreasing(t, object, append([]interface{}{msg}, args...)...) -} - -// IsTypef asserts that the specified objects are of the same type. -func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return IsType(t, expectedType, object, append([]interface{}{msg}, args...)...) -} - -// JSONEqf asserts that two JSON strings are equivalent. -// -// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") -func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return JSONEq(t, expected, actual, append([]interface{}{msg}, args...)...) -} - -// Lenf asserts that the specified object has specific length. -// Lenf also fails if the object has a type that len() not accept. -// -// assert.Lenf(t, mySlice, 3, "error message %s", "formatted") -func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Len(t, object, length, append([]interface{}{msg}, args...)...) -} - -// Lessf asserts that the first element is less than the second -// -// assert.Lessf(t, 1, 2, "error message %s", "formatted") -// assert.Lessf(t, float64(1), float64(2), "error message %s", "formatted") -// assert.Lessf(t, "a", "b", "error message %s", "formatted") -func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Less(t, e1, e2, append([]interface{}{msg}, args...)...) -} - -// LessOrEqualf asserts that the first element is less than or equal to the second -// -// assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted") -// assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted") -// assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted") -// assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted") -func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return LessOrEqual(t, e1, e2, append([]interface{}{msg}, args...)...) -} - -// Negativef asserts that the specified element is negative -// -// assert.Negativef(t, -1, "error message %s", "formatted") -// assert.Negativef(t, -1.23, "error message %s", "formatted") -func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Negative(t, e, append([]interface{}{msg}, args...)...) -} - -// Nilf asserts that the specified object is nil. -// -// assert.Nilf(t, err, "error message %s", "formatted") -func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Nil(t, object, append([]interface{}{msg}, args...)...) -} - -// NoErrorf asserts that a function returned no error (i.e. `nil`). -// -// actualObj, err := SomeFunction() -// if assert.NoErrorf(t, err, "error message %s", "formatted") { -// assert.Equal(t, expectedObj, actualObj) -// } -func NoErrorf(t TestingT, err error, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NoError(t, err, append([]interface{}{msg}, args...)...) -} - -// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the -// specified substring or element. -// -// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted") -// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted") -// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted") -func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotContains(t, s, contains, append([]interface{}{msg}, args...)...) -} - -// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// if assert.NotEmptyf(t, obj, "error message %s", "formatted") { -// assert.Equal(t, "two", obj[1]) -// } -func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotEmpty(t, object, append([]interface{}{msg}, args...)...) -} - -// NotEqualf asserts that the specified values are NOT equal. -// -// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted") -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). -func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotEqual(t, expected, actual, append([]interface{}{msg}, args...)...) -} - -// NotEqualValuesf asserts that two objects are not equal even when converted to the same type -// -// assert.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted") -func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotEqualValues(t, expected, actual, append([]interface{}{msg}, args...)...) -} - -// NotErrorIsf asserts that at none of the errors in err's chain matches target. -// This is a wrapper for errors.Is. -func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotErrorIs(t, err, target, append([]interface{}{msg}, args...)...) -} - -// NotNilf asserts that the specified object is not nil. -// -// assert.NotNilf(t, err, "error message %s", "formatted") -func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotNil(t, object, append([]interface{}{msg}, args...)...) -} - -// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. -// -// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted") -func NotPanicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotPanics(t, f, append([]interface{}{msg}, args...)...) -} - -// NotRegexpf asserts that a specified regexp does not match a string. -// -// assert.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") -// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") -func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotRegexp(t, rx, str, append([]interface{}{msg}, args...)...) -} - -// NotSamef asserts that two pointers do not reference the same object. -// -// assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted") -// -// Both arguments must be pointer variables. Pointer variable sameness is -// determined based on the equality of both type and value. -func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotSame(t, expected, actual, append([]interface{}{msg}, args...)...) -} - -// NotSubsetf asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). -// -// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") -func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotSubset(t, list, subset, append([]interface{}{msg}, args...)...) -} - -// NotZerof asserts that i is not the zero value for its type. -func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotZero(t, i, append([]interface{}{msg}, args...)...) -} - -// Panicsf asserts that the code inside the specified PanicTestFunc panics. -// -// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted") -func Panicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Panics(t, f, append([]interface{}{msg}, args...)...) -} - -// PanicsWithErrorf asserts that the code inside the specified PanicTestFunc -// panics, and that the recovered panic value is an error that satisfies the -// EqualError comparison. -// -// assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") -func PanicsWithErrorf(t TestingT, errString string, f PanicTestFunc, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return PanicsWithError(t, errString, f, append([]interface{}{msg}, args...)...) -} - -// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that -// the recovered panic value equals the expected panic value. -// -// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") -func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return PanicsWithValue(t, expected, f, append([]interface{}{msg}, args...)...) -} - -// Positivef asserts that the specified element is positive -// -// assert.Positivef(t, 1, "error message %s", "formatted") -// assert.Positivef(t, 1.23, "error message %s", "formatted") -func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Positive(t, e, append([]interface{}{msg}, args...)...) -} - -// Regexpf asserts that a specified regexp matches a string. -// -// assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") -// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") -func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Regexp(t, rx, str, append([]interface{}{msg}, args...)...) -} - -// Samef asserts that two pointers reference the same object. -// -// assert.Samef(t, ptr1, ptr2, "error message %s", "formatted") -// -// Both arguments must be pointer variables. Pointer variable sameness is -// determined based on the equality of both type and value. -func Samef(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Same(t, expected, actual, append([]interface{}{msg}, args...)...) -} - -// Subsetf asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). -// -// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") -func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Subset(t, list, subset, append([]interface{}{msg}, args...)...) -} - -// Truef asserts that the specified value is true. -// -// assert.Truef(t, myBool, "error message %s", "formatted") -func Truef(t TestingT, value bool, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return True(t, value, append([]interface{}{msg}, args...)...) -} - -// Zerof asserts that i is the zero value for its type. -func Zerof(t TestingT, i interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Zero(t, i, append([]interface{}{msg}, args...)...) -} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl b/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl deleted file mode 100644 index d2bb0b8..0000000 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl +++ /dev/null @@ -1,5 +0,0 @@ -{{.CommentFormat}} -func {{.DocInfo.Name}}f(t TestingT, {{.ParamsFormat}}) bool { - if h, ok := t.(tHelper); ok { h.Helper() } - return {{.DocInfo.Name}}(t, {{.ForwardedParamsFormat}}) -} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go deleted file mode 100644 index 127fa7e..0000000 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ /dev/null @@ -1,1164 +0,0 @@ -/* -* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen -* THIS FILE MUST NOT BE EDITED BY HAND - */ - -package assert - -// Condition uses a Comparison to assert a complex condition. -func (a *Assertions) Condition(comp Comparison, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Condition(a.t, comp, msgAndArgs...) -} - -// Conditionf uses a Comparison to assert a complex condition. -func (a *Assertions) Conditionf(comp Comparison, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Conditionf(a.t, comp, msg, args...) -} - -// Contains asserts that the specified string, list(array, slice...) or map contains the -// specified substring or element. -// -// a.Contains("Hello World", "World") -// a.Contains(["Hello", "World"], "World") -// a.Contains({"Hello": "World"}, "Hello") -func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Contains(a.t, s, contains, msgAndArgs...) -} - -// Containsf asserts that the specified string, list(array, slice...) or map contains the -// specified substring or element. -// -// a.Containsf("Hello World", "World", "error message %s", "formatted") -// a.Containsf(["Hello", "World"], "World", "error message %s", "formatted") -// a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted") -func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Containsf(a.t, s, contains, msg, args...) -} - -// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified -// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, -// the number of appearances of each of them in both lists should match. -// -// a.ElementsMatch([1, 3, 2, 3], [1, 3, 3, 2]) -func (a *Assertions) ElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return ElementsMatch(a.t, listA, listB, msgAndArgs...) -} - -// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified -// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, -// the number of appearances of each of them in both lists should match. -// -// a.ElementsMatchf([1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") -func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return ElementsMatchf(a.t, listA, listB, msg, args...) -} - -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// a.Empty(obj) -func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Empty(a.t, object, msgAndArgs...) -} - -// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// a.Emptyf(obj, "error message %s", "formatted") -func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Emptyf(a.t, object, msg, args...) -} - -// Equal asserts that two objects are equal. -// -// a.Equal(123, 123) -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). Function equality -// cannot be determined and will always fail. -func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Equal(a.t, expected, actual, msgAndArgs...) -} - -// EqualError asserts that a function returned an error (i.e. not `nil`) -// and that it is equal to the provided error. -// -// actualObj, err := SomeFunction() -// a.EqualError(err, expectedErrorString) -func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return EqualError(a.t, theError, errString, msgAndArgs...) -} - -// EqualErrorf asserts that a function returned an error (i.e. not `nil`) -// and that it is equal to the provided error. -// -// actualObj, err := SomeFunction() -// a.EqualErrorf(err, expectedErrorString, "error message %s", "formatted") -func (a *Assertions) EqualErrorf(theError error, errString string, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return EqualErrorf(a.t, theError, errString, msg, args...) -} - -// EqualValues asserts that two objects are equal or convertable to the same types -// and equal. -// -// a.EqualValues(uint32(123), int32(123)) -func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return EqualValues(a.t, expected, actual, msgAndArgs...) -} - -// EqualValuesf asserts that two objects are equal or convertable to the same types -// and equal. -// -// a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") -func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return EqualValuesf(a.t, expected, actual, msg, args...) -} - -// Equalf asserts that two objects are equal. -// -// a.Equalf(123, 123, "error message %s", "formatted") -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). Function equality -// cannot be determined and will always fail. -func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Equalf(a.t, expected, actual, msg, args...) -} - -// Error asserts that a function returned an error (i.e. not `nil`). -// -// actualObj, err := SomeFunction() -// if a.Error(err) { -// assert.Equal(t, expectedError, err) -// } -func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Error(a.t, err, msgAndArgs...) -} - -// ErrorAs asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value. -// This is a wrapper for errors.As. -func (a *Assertions) ErrorAs(err error, target interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return ErrorAs(a.t, err, target, msgAndArgs...) -} - -// ErrorAsf asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value. -// This is a wrapper for errors.As. -func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return ErrorAsf(a.t, err, target, msg, args...) -} - -// ErrorIs asserts that at least one of the errors in err's chain matches target. -// This is a wrapper for errors.Is. -func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return ErrorIs(a.t, err, target, msgAndArgs...) -} - -// ErrorIsf asserts that at least one of the errors in err's chain matches target. -// This is a wrapper for errors.Is. -func (a *Assertions) ErrorIsf(err error, target error, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return ErrorIsf(a.t, err, target, msg, args...) -} - -// Errorf asserts that a function returned an error (i.e. not `nil`). -// -// actualObj, err := SomeFunction() -// if a.Errorf(err, "error message %s", "formatted") { -// assert.Equal(t, expectedErrorf, err) -// } -func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Errorf(a.t, err, msg, args...) -} - -// Exactly asserts that two objects are equal in value and type. -// -// a.Exactly(int32(123), int64(123)) -func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Exactly(a.t, expected, actual, msgAndArgs...) -} - -// Exactlyf asserts that two objects are equal in value and type. -// -// a.Exactlyf(int32(123), int64(123), "error message %s", "formatted") -func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Exactlyf(a.t, expected, actual, msg, args...) -} - -// Fail reports a failure through -func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Fail(a.t, failureMessage, msgAndArgs...) -} - -// FailNow fails test -func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return FailNow(a.t, failureMessage, msgAndArgs...) -} - -// FailNowf fails test -func (a *Assertions) FailNowf(failureMessage string, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return FailNowf(a.t, failureMessage, msg, args...) -} - -// Failf reports a failure through -func (a *Assertions) Failf(failureMessage string, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Failf(a.t, failureMessage, msg, args...) -} - -// False asserts that the specified value is false. -// -// a.False(myBool) -func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return False(a.t, value, msgAndArgs...) -} - -// Falsef asserts that the specified value is false. -// -// a.Falsef(myBool, "error message %s", "formatted") -func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Falsef(a.t, value, msg, args...) -} - -// Greater asserts that the first element is greater than the second -// -// a.Greater(2, 1) -// a.Greater(float64(2), float64(1)) -// a.Greater("b", "a") -func (a *Assertions) Greater(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Greater(a.t, e1, e2, msgAndArgs...) -} - -// GreaterOrEqual asserts that the first element is greater than or equal to the second -// -// a.GreaterOrEqual(2, 1) -// a.GreaterOrEqual(2, 2) -// a.GreaterOrEqual("b", "a") -// a.GreaterOrEqual("b", "b") -func (a *Assertions) GreaterOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return GreaterOrEqual(a.t, e1, e2, msgAndArgs...) -} - -// GreaterOrEqualf asserts that the first element is greater than or equal to the second -// -// a.GreaterOrEqualf(2, 1, "error message %s", "formatted") -// a.GreaterOrEqualf(2, 2, "error message %s", "formatted") -// a.GreaterOrEqualf("b", "a", "error message %s", "formatted") -// a.GreaterOrEqualf("b", "b", "error message %s", "formatted") -func (a *Assertions) GreaterOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return GreaterOrEqualf(a.t, e1, e2, msg, args...) -} - -// Greaterf asserts that the first element is greater than the second -// -// a.Greaterf(2, 1, "error message %s", "formatted") -// a.Greaterf(float64(2), float64(1), "error message %s", "formatted") -// a.Greaterf("b", "a", "error message %s", "formatted") -func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Greaterf(a.t, e1, e2, msg, args...) -} - -// Implements asserts that an object is implemented by the specified interface. -// -// a.Implements((*MyInterface)(nil), new(MyObject)) -func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Implements(a.t, interfaceObject, object, msgAndArgs...) -} - -// Implementsf asserts that an object is implemented by the specified interface. -// -// a.Implementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted") -func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Implementsf(a.t, interfaceObject, object, msg, args...) -} - -// InDelta asserts that the two numerals are within delta of each other. -// -// a.InDelta(math.Pi, 22/7.0, 0.01) -func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InDelta(a.t, expected, actual, delta, msgAndArgs...) -} - -// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. -func (a *Assertions) InDeltaMapValues(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InDeltaMapValues(a.t, expected, actual, delta, msgAndArgs...) -} - -// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. -func (a *Assertions) InDeltaMapValuesf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InDeltaMapValuesf(a.t, expected, actual, delta, msg, args...) -} - -// InDeltaSlice is the same as InDelta, except it compares two slices. -func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...) -} - -// InDeltaSlicef is the same as InDelta, except it compares two slices. -func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InDeltaSlicef(a.t, expected, actual, delta, msg, args...) -} - -// InDeltaf asserts that the two numerals are within delta of each other. -// -// a.InDeltaf(math.Pi, 22/7.0, 0.01, "error message %s", "formatted") -func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InDeltaf(a.t, expected, actual, delta, msg, args...) -} - -// InEpsilon asserts that expected and actual have a relative error less than epsilon -func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...) -} - -// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices. -func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InEpsilonSlice(a.t, expected, actual, epsilon, msgAndArgs...) -} - -// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices. -func (a *Assertions) InEpsilonSlicef(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InEpsilonSlicef(a.t, expected, actual, epsilon, msg, args...) -} - -// InEpsilonf asserts that expected and actual have a relative error less than epsilon -func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InEpsilonf(a.t, expected, actual, epsilon, msg, args...) -} - -// IsDecreasing asserts that the collection is decreasing -// -// a.IsDecreasing([]int{2, 1, 0}) -// a.IsDecreasing([]float{2, 1}) -// a.IsDecreasing([]string{"b", "a"}) -func (a *Assertions) IsDecreasing(object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return IsDecreasing(a.t, object, msgAndArgs...) -} - -// IsDecreasingf asserts that the collection is decreasing -// -// a.IsDecreasingf([]int{2, 1, 0}, "error message %s", "formatted") -// a.IsDecreasingf([]float{2, 1}, "error message %s", "formatted") -// a.IsDecreasingf([]string{"b", "a"}, "error message %s", "formatted") -func (a *Assertions) IsDecreasingf(object interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return IsDecreasingf(a.t, object, msg, args...) -} - -// IsIncreasing asserts that the collection is increasing -// -// a.IsIncreasing([]int{1, 2, 3}) -// a.IsIncreasing([]float{1, 2}) -// a.IsIncreasing([]string{"a", "b"}) -func (a *Assertions) IsIncreasing(object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return IsIncreasing(a.t, object, msgAndArgs...) -} - -// IsIncreasingf asserts that the collection is increasing -// -// a.IsIncreasingf([]int{1, 2, 3}, "error message %s", "formatted") -// a.IsIncreasingf([]float{1, 2}, "error message %s", "formatted") -// a.IsIncreasingf([]string{"a", "b"}, "error message %s", "formatted") -func (a *Assertions) IsIncreasingf(object interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return IsIncreasingf(a.t, object, msg, args...) -} - -// IsNonDecreasing asserts that the collection is not decreasing -// -// a.IsNonDecreasing([]int{1, 1, 2}) -// a.IsNonDecreasing([]float{1, 2}) -// a.IsNonDecreasing([]string{"a", "b"}) -func (a *Assertions) IsNonDecreasing(object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return IsNonDecreasing(a.t, object, msgAndArgs...) -} - -// IsNonDecreasingf asserts that the collection is not decreasing -// -// a.IsNonDecreasingf([]int{1, 1, 2}, "error message %s", "formatted") -// a.IsNonDecreasingf([]float{1, 2}, "error message %s", "formatted") -// a.IsNonDecreasingf([]string{"a", "b"}, "error message %s", "formatted") -func (a *Assertions) IsNonDecreasingf(object interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return IsNonDecreasingf(a.t, object, msg, args...) -} - -// IsNonIncreasing asserts that the collection is not increasing -// -// a.IsNonIncreasing([]int{2, 1, 1}) -// a.IsNonIncreasing([]float{2, 1}) -// a.IsNonIncreasing([]string{"b", "a"}) -func (a *Assertions) IsNonIncreasing(object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return IsNonIncreasing(a.t, object, msgAndArgs...) -} - -// IsNonIncreasingf asserts that the collection is not increasing -// -// a.IsNonIncreasingf([]int{2, 1, 1}, "error message %s", "formatted") -// a.IsNonIncreasingf([]float{2, 1}, "error message %s", "formatted") -// a.IsNonIncreasingf([]string{"b", "a"}, "error message %s", "formatted") -func (a *Assertions) IsNonIncreasingf(object interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return IsNonIncreasingf(a.t, object, msg, args...) -} - -// IsType asserts that the specified objects are of the same type. -func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return IsType(a.t, expectedType, object, msgAndArgs...) -} - -// IsTypef asserts that the specified objects are of the same type. -func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return IsTypef(a.t, expectedType, object, msg, args...) -} - -// JSONEq asserts that two JSON strings are equivalent. -// -// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) -func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return JSONEq(a.t, expected, actual, msgAndArgs...) -} - -// JSONEqf asserts that two JSON strings are equivalent. -// -// a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") -func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return JSONEqf(a.t, expected, actual, msg, args...) -} - -// Len asserts that the specified object has specific length. -// Len also fails if the object has a type that len() not accept. -// -// a.Len(mySlice, 3) -func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Len(a.t, object, length, msgAndArgs...) -} - -// Lenf asserts that the specified object has specific length. -// Lenf also fails if the object has a type that len() not accept. -// -// a.Lenf(mySlice, 3, "error message %s", "formatted") -func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Lenf(a.t, object, length, msg, args...) -} - -// Less asserts that the first element is less than the second -// -// a.Less(1, 2) -// a.Less(float64(1), float64(2)) -// a.Less("a", "b") -func (a *Assertions) Less(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Less(a.t, e1, e2, msgAndArgs...) -} - -// LessOrEqual asserts that the first element is less than or equal to the second -// -// a.LessOrEqual(1, 2) -// a.LessOrEqual(2, 2) -// a.LessOrEqual("a", "b") -// a.LessOrEqual("b", "b") -func (a *Assertions) LessOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return LessOrEqual(a.t, e1, e2, msgAndArgs...) -} - -// LessOrEqualf asserts that the first element is less than or equal to the second -// -// a.LessOrEqualf(1, 2, "error message %s", "formatted") -// a.LessOrEqualf(2, 2, "error message %s", "formatted") -// a.LessOrEqualf("a", "b", "error message %s", "formatted") -// a.LessOrEqualf("b", "b", "error message %s", "formatted") -func (a *Assertions) LessOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return LessOrEqualf(a.t, e1, e2, msg, args...) -} - -// Lessf asserts that the first element is less than the second -// -// a.Lessf(1, 2, "error message %s", "formatted") -// a.Lessf(float64(1), float64(2), "error message %s", "formatted") -// a.Lessf("a", "b", "error message %s", "formatted") -func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Lessf(a.t, e1, e2, msg, args...) -} - -// Negative asserts that the specified element is negative -// -// a.Negative(-1) -// a.Negative(-1.23) -func (a *Assertions) Negative(e interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Negative(a.t, e, msgAndArgs...) -} - -// Negativef asserts that the specified element is negative -// -// a.Negativef(-1, "error message %s", "formatted") -// a.Negativef(-1.23, "error message %s", "formatted") -func (a *Assertions) Negativef(e interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Negativef(a.t, e, msg, args...) -} - -// Nil asserts that the specified object is nil. -// -// a.Nil(err) -func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Nil(a.t, object, msgAndArgs...) -} - -// Nilf asserts that the specified object is nil. -// -// a.Nilf(err, "error message %s", "formatted") -func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Nilf(a.t, object, msg, args...) -} - -// NoError asserts that a function returned no error (i.e. `nil`). -// -// actualObj, err := SomeFunction() -// if a.NoError(err) { -// assert.Equal(t, expectedObj, actualObj) -// } -func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NoError(a.t, err, msgAndArgs...) -} - -// NoErrorf asserts that a function returned no error (i.e. `nil`). -// -// actualObj, err := SomeFunction() -// if a.NoErrorf(err, "error message %s", "formatted") { -// assert.Equal(t, expectedObj, actualObj) -// } -func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NoErrorf(a.t, err, msg, args...) -} - -// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the -// specified substring or element. -// -// a.NotContains("Hello World", "Earth") -// a.NotContains(["Hello", "World"], "Earth") -// a.NotContains({"Hello": "World"}, "Earth") -func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotContains(a.t, s, contains, msgAndArgs...) -} - -// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the -// specified substring or element. -// -// a.NotContainsf("Hello World", "Earth", "error message %s", "formatted") -// a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted") -// a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted") -func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotContainsf(a.t, s, contains, msg, args...) -} - -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// if a.NotEmpty(obj) { -// assert.Equal(t, "two", obj[1]) -// } -func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotEmpty(a.t, object, msgAndArgs...) -} - -// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// if a.NotEmptyf(obj, "error message %s", "formatted") { -// assert.Equal(t, "two", obj[1]) -// } -func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotEmptyf(a.t, object, msg, args...) -} - -// NotEqual asserts that the specified values are NOT equal. -// -// a.NotEqual(obj1, obj2) -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). -func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotEqual(a.t, expected, actual, msgAndArgs...) -} - -// NotEqualValues asserts that two objects are not equal even when converted to the same type -// -// a.NotEqualValues(obj1, obj2) -func (a *Assertions) NotEqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotEqualValues(a.t, expected, actual, msgAndArgs...) -} - -// NotEqualValuesf asserts that two objects are not equal even when converted to the same type -// -// a.NotEqualValuesf(obj1, obj2, "error message %s", "formatted") -func (a *Assertions) NotEqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotEqualValuesf(a.t, expected, actual, msg, args...) -} - -// NotEqualf asserts that the specified values are NOT equal. -// -// a.NotEqualf(obj1, obj2, "error message %s", "formatted") -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). -func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotEqualf(a.t, expected, actual, msg, args...) -} - -// NotErrorIs asserts that at none of the errors in err's chain matches target. -// This is a wrapper for errors.Is. -func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotErrorIs(a.t, err, target, msgAndArgs...) -} - -// NotErrorIsf asserts that at none of the errors in err's chain matches target. -// This is a wrapper for errors.Is. -func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotErrorIsf(a.t, err, target, msg, args...) -} - -// NotNil asserts that the specified object is not nil. -// -// a.NotNil(err) -func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotNil(a.t, object, msgAndArgs...) -} - -// NotNilf asserts that the specified object is not nil. -// -// a.NotNilf(err, "error message %s", "formatted") -func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotNilf(a.t, object, msg, args...) -} - -// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. -// -// a.NotPanics(func(){ RemainCalm() }) -func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotPanics(a.t, f, msgAndArgs...) -} - -// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. -// -// a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted") -func (a *Assertions) NotPanicsf(f PanicTestFunc, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotPanicsf(a.t, f, msg, args...) -} - -// NotRegexp asserts that a specified regexp does not match a string. -// -// a.NotRegexp(regexp.MustCompile("starts"), "it's starting") -// a.NotRegexp("^start", "it's not starting") -func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotRegexp(a.t, rx, str, msgAndArgs...) -} - -// NotRegexpf asserts that a specified regexp does not match a string. -// -// a.NotRegexpf(regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") -// a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted") -func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotRegexpf(a.t, rx, str, msg, args...) -} - -// NotSame asserts that two pointers do not reference the same object. -// -// a.NotSame(ptr1, ptr2) -// -// Both arguments must be pointer variables. Pointer variable sameness is -// determined based on the equality of both type and value. -func (a *Assertions) NotSame(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotSame(a.t, expected, actual, msgAndArgs...) -} - -// NotSamef asserts that two pointers do not reference the same object. -// -// a.NotSamef(ptr1, ptr2, "error message %s", "formatted") -// -// Both arguments must be pointer variables. Pointer variable sameness is -// determined based on the equality of both type and value. -func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotSamef(a.t, expected, actual, msg, args...) -} - -// NotSubset asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). -// -// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") -func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotSubset(a.t, list, subset, msgAndArgs...) -} - -// NotSubsetf asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). -// -// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") -func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotSubsetf(a.t, list, subset, msg, args...) -} - -// NotZero asserts that i is not the zero value for its type. -func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotZero(a.t, i, msgAndArgs...) -} - -// NotZerof asserts that i is not the zero value for its type. -func (a *Assertions) NotZerof(i interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotZerof(a.t, i, msg, args...) -} - -// Panics asserts that the code inside the specified PanicTestFunc panics. -// -// a.Panics(func(){ GoCrazy() }) -func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Panics(a.t, f, msgAndArgs...) -} - -// PanicsWithError asserts that the code inside the specified PanicTestFunc -// panics, and that the recovered panic value is an error that satisfies the -// EqualError comparison. -// -// a.PanicsWithError("crazy error", func(){ GoCrazy() }) -func (a *Assertions) PanicsWithError(errString string, f PanicTestFunc, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return PanicsWithError(a.t, errString, f, msgAndArgs...) -} - -// PanicsWithErrorf asserts that the code inside the specified PanicTestFunc -// panics, and that the recovered panic value is an error that satisfies the -// EqualError comparison. -// -// a.PanicsWithErrorf("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") -func (a *Assertions) PanicsWithErrorf(errString string, f PanicTestFunc, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return PanicsWithErrorf(a.t, errString, f, msg, args...) -} - -// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that -// the recovered panic value equals the expected panic value. -// -// a.PanicsWithValue("crazy error", func(){ GoCrazy() }) -func (a *Assertions) PanicsWithValue(expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return PanicsWithValue(a.t, expected, f, msgAndArgs...) -} - -// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that -// the recovered panic value equals the expected panic value. -// -// a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") -func (a *Assertions) PanicsWithValuef(expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return PanicsWithValuef(a.t, expected, f, msg, args...) -} - -// Panicsf asserts that the code inside the specified PanicTestFunc panics. -// -// a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted") -func (a *Assertions) Panicsf(f PanicTestFunc, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Panicsf(a.t, f, msg, args...) -} - -// Positive asserts that the specified element is positive -// -// a.Positive(1) -// a.Positive(1.23) -func (a *Assertions) Positive(e interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Positive(a.t, e, msgAndArgs...) -} - -// Positivef asserts that the specified element is positive -// -// a.Positivef(1, "error message %s", "formatted") -// a.Positivef(1.23, "error message %s", "formatted") -func (a *Assertions) Positivef(e interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Positivef(a.t, e, msg, args...) -} - -// Regexp asserts that a specified regexp matches a string. -// -// a.Regexp(regexp.MustCompile("start"), "it's starting") -// a.Regexp("start...$", "it's not starting") -func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Regexp(a.t, rx, str, msgAndArgs...) -} - -// Regexpf asserts that a specified regexp matches a string. -// -// a.Regexpf(regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") -// a.Regexpf("start...$", "it's not starting", "error message %s", "formatted") -func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Regexpf(a.t, rx, str, msg, args...) -} - -// Same asserts that two pointers reference the same object. -// -// a.Same(ptr1, ptr2) -// -// Both arguments must be pointer variables. Pointer variable sameness is -// determined based on the equality of both type and value. -func (a *Assertions) Same(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Same(a.t, expected, actual, msgAndArgs...) -} - -// Samef asserts that two pointers reference the same object. -// -// a.Samef(ptr1, ptr2, "error message %s", "formatted") -// -// Both arguments must be pointer variables. Pointer variable sameness is -// determined based on the equality of both type and value. -func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Samef(a.t, expected, actual, msg, args...) -} - -// Subset asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). -// -// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") -func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Subset(a.t, list, subset, msgAndArgs...) -} - -// Subsetf asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). -// -// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") -func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Subsetf(a.t, list, subset, msg, args...) -} - -// True asserts that the specified value is true. -// -// a.True(myBool) -func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return True(a.t, value, msgAndArgs...) -} - -// Truef asserts that the specified value is true. -// -// a.Truef(myBool, "error message %s", "formatted") -func (a *Assertions) Truef(value bool, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Truef(a.t, value, msg, args...) -} - -// Zero asserts that i is the zero value for its type. -func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Zero(a.t, i, msgAndArgs...) -} - -// Zerof asserts that i is the zero value for its type. -func (a *Assertions) Zerof(i interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Zerof(a.t, i, msg, args...) -} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl b/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl deleted file mode 100644 index 188bb9e..0000000 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl +++ /dev/null @@ -1,5 +0,0 @@ -{{.CommentWithoutT "a"}} -func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) bool { - if h, ok := a.t.(tHelper); ok { h.Helper() } - return {{.DocInfo.Name}}(a.t, {{.ForwardedParams}}) -} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go deleted file mode 100644 index 1c3b471..0000000 --- a/vendor/github.com/stretchr/testify/assert/assertion_order.go +++ /dev/null @@ -1,81 +0,0 @@ -package assert - -import ( - "fmt" - "reflect" -) - -// isOrdered checks that collection contains orderable elements. -func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool { - objKind := reflect.TypeOf(object).Kind() - if objKind != reflect.Slice && objKind != reflect.Array { - return false - } - - objValue := reflect.ValueOf(object) - objLen := objValue.Len() - - if objLen <= 1 { - return true - } - - value := objValue.Index(0) - valueInterface := value.Interface() - firstValueKind := value.Kind() - - for i := 1; i < objLen; i++ { - prevValue := value - prevValueInterface := valueInterface - - value = objValue.Index(i) - valueInterface = value.Interface() - - compareResult, isComparable := compare(prevValueInterface, valueInterface, firstValueKind) - - if !isComparable { - return Fail(t, fmt.Sprintf("Can not compare type \"%s\" and \"%s\"", reflect.TypeOf(value), reflect.TypeOf(prevValue)), msgAndArgs...) - } - - if !containsValue(allowedComparesResults, compareResult) { - return Fail(t, fmt.Sprintf(failMessage, prevValue, value), msgAndArgs...) - } - } - - return true -} - -// IsIncreasing asserts that the collection is increasing -// -// assert.IsIncreasing(t, []int{1, 2, 3}) -// assert.IsIncreasing(t, []float{1, 2}) -// assert.IsIncreasing(t, []string{"a", "b"}) -func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs) -} - -// IsNonIncreasing asserts that the collection is not increasing -// -// assert.IsNonIncreasing(t, []int{2, 1, 1}) -// assert.IsNonIncreasing(t, []float{2, 1}) -// assert.IsNonIncreasing(t, []string{"b", "a"}) -func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs) -} - -// IsDecreasing asserts that the collection is decreasing -// -// assert.IsDecreasing(t, []int{2, 1, 0}) -// assert.IsDecreasing(t, []float{2, 1}) -// assert.IsDecreasing(t, []string{"b", "a"}) -func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs) -} - -// IsNonDecreasing asserts that the collection is not decreasing -// -// assert.IsNonDecreasing(t, []int{1, 1, 2}) -// assert.IsNonDecreasing(t, []float{1, 2}) -// assert.IsNonDecreasing(t, []string{"a", "b"}) -func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs) -} diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go deleted file mode 100644 index b3fef17..0000000 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ /dev/null @@ -1,1592 +0,0 @@ -package assert - -import ( - "bufio" - "bytes" - "encoding/json" - "errors" - "fmt" - "math" - "reflect" - "regexp" - "runtime" - "runtime/debug" - "strings" - "unicode" - "unicode/utf8" - - "github.com/davecgh/go-spew/spew" - "github.com/pmezard/go-difflib/difflib" -) - -//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_format.go.tmpl" - -// TestingT is an interface wrapper around *testing.T -type TestingT interface { - Errorf(format string, args ...interface{}) -} - -// ComparisonAssertionFunc is a common function prototype when comparing two values. Can be useful -// for table driven tests. -type ComparisonAssertionFunc func(TestingT, interface{}, interface{}, ...interface{}) bool - -// ValueAssertionFunc is a common function prototype when validating a single value. Can be useful -// for table driven tests. -type ValueAssertionFunc func(TestingT, interface{}, ...interface{}) bool - -// BoolAssertionFunc is a common function prototype when validating a bool value. Can be useful -// for table driven tests. -type BoolAssertionFunc func(TestingT, bool, ...interface{}) bool - -// ErrorAssertionFunc is a common function prototype when validating an error value. Can be useful -// for table driven tests. -type ErrorAssertionFunc func(TestingT, error, ...interface{}) bool - -// Comparison is a custom function that returns true on success and false on failure -type Comparison func() (success bool) - -/* - Helper functions -*/ - -// ObjectsAreEqual determines if two objects are considered equal. -// -// This function does no assertion of any kind. -func ObjectsAreEqual(expected, actual interface{}) bool { - if expected == nil || actual == nil { - return expected == actual - } - - exp, ok := expected.([]byte) - if !ok { - return reflect.DeepEqual(expected, actual) - } - - act, ok := actual.([]byte) - if !ok { - return false - } - if exp == nil || act == nil { - return exp == nil && act == nil - } - return bytes.Equal(exp, act) -} - -// ObjectsAreEqualValues gets whether two objects are equal, or if their -// values are equal. -func ObjectsAreEqualValues(expected, actual interface{}) bool { - if ObjectsAreEqual(expected, actual) { - return true - } - - actualType := reflect.TypeOf(actual) - if actualType == nil { - return false - } - expectedValue := reflect.ValueOf(expected) - if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) { - // Attempt comparison after type conversion - return reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), actual) - } - - return false -} - -/* CallerInfo is necessary because the assert functions use the testing object -internally, causing it to print the file:line of the assert method, rather than where -the problem actually occurred in calling code.*/ - -// CallerInfo returns an array of strings containing the file and line number -// of each stack frame leading from the current test to the assert call that -// failed. -func CallerInfo() []string { - - var pc uintptr - var ok bool - var file string - var line int - var name string - - callers := []string{} - for i := 0; ; i++ { - pc, file, line, ok = runtime.Caller(i) - if !ok { - // The breaks below failed to terminate the loop, and we ran off the - // end of the call stack. - break - } - - // This is a huge edge case, but it will panic if this is the case, see #180 - if file == "" { - break - } - - f := runtime.FuncForPC(pc) - if f == nil { - break - } - name = f.Name() - - // testing.tRunner is the standard library function that calls - // tests. Subtests are called directly by tRunner, without going through - // the Test/Benchmark/Example function that contains the t.Run calls, so - // with subtests we should break when we hit tRunner, without adding it - // to the list of callers. - if name == "testing.tRunner" { - break - } - - parts := strings.Split(file, "/") - file = parts[len(parts)-1] - if len(parts) > 1 { - dir := parts[len(parts)-2] - if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" { - callers = append(callers, fmt.Sprintf("%s:%d", file, line)) - } - } - - // Drop the package - segments := strings.Split(name, ".") - name = segments[len(segments)-1] - if isTest(name, "Test") || - isTest(name, "Benchmark") || - isTest(name, "Example") { - break - } - } - - return callers -} - -// Stolen from the `go test` tool. -// isTest tells whether name looks like a test (or benchmark, according to prefix). -// It is a Test (say) if there is a character after Test that is not a lower-case letter. -// We don't want TesticularCancer. -func isTest(name, prefix string) bool { - if !strings.HasPrefix(name, prefix) { - return false - } - if len(name) == len(prefix) { // "Test" is ok - return true - } - r, _ := utf8.DecodeRuneInString(name[len(prefix):]) - return !unicode.IsLower(r) -} - -func messageFromMsgAndArgs(msgAndArgs ...interface{}) string { - if len(msgAndArgs) == 0 || msgAndArgs == nil { - return "" - } - if len(msgAndArgs) == 1 { - msg := msgAndArgs[0] - if msgAsStr, ok := msg.(string); ok { - return msgAsStr - } - return fmt.Sprintf("%+v", msg) - } - if len(msgAndArgs) > 1 { - return fmt.Sprintf(msgAndArgs[0].(string), msgAndArgs[1:]...) - } - return "" -} - -// Aligns the provided message so that all lines after the first line start at the same location as the first line. -// Assumes that the first line starts at the correct location (after carriage return, tab, label, spacer and tab). -// The longestLabelLen parameter specifies the length of the longest label in the output (required becaues this is the -// basis on which the alignment occurs). -func indentMessageLines(message string, longestLabelLen int) string { - outBuf := new(bytes.Buffer) - - for i, scanner := 0, bufio.NewScanner(strings.NewReader(message)); scanner.Scan(); i++ { - // no need to align first line because it starts at the correct location (after the label) - if i != 0 { - // append alignLen+1 spaces to align with "{{longestLabel}}:" before adding tab - outBuf.WriteString("\n\t" + strings.Repeat(" ", longestLabelLen+1) + "\t") - } - outBuf.WriteString(scanner.Text()) - } - - return outBuf.String() -} - -type failNower interface { - FailNow() -} - -// FailNow fails test -func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - Fail(t, failureMessage, msgAndArgs...) - - // We cannot extend TestingT with FailNow() and - // maintain backwards compatibility, so we fallback - // to panicking when FailNow is not available in - // TestingT. - // See issue #263 - - if t, ok := t.(failNower); ok { - t.FailNow() - } else { - panic("test failed and t is missing `FailNow()`") - } - return false -} - -// Fail reports a failure through -func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - content := []labeledContent{ - {"Error Trace", strings.Join(CallerInfo(), "\n\t\t\t")}, - {"Error", failureMessage}, - } - - // Add test name if the Go version supports it - if n, ok := t.(interface { - Name() string - }); ok { - content = append(content, labeledContent{"Test", n.Name()}) - } - - message := messageFromMsgAndArgs(msgAndArgs...) - if len(message) > 0 { - content = append(content, labeledContent{"Messages", message}) - } - - t.Errorf("\n%s", ""+labeledOutput(content...)) - - return false -} - -type labeledContent struct { - label string - content string -} - -// labeledOutput returns a string consisting of the provided labeledContent. Each labeled output is appended in the following manner: -// -// \t{{label}}:{{align_spaces}}\t{{content}}\n -// -// The initial carriage return is required to undo/erase any padding added by testing.T.Errorf. The "\t{{label}}:" is for the label. -// If a label is shorter than the longest label provided, padding spaces are added to make all the labels match in length. Once this -// alignment is achieved, "\t{{content}}\n" is added for the output. -// -// If the content of the labeledOutput contains line breaks, the subsequent lines are aligned so that they start at the same location as the first line. -func labeledOutput(content ...labeledContent) string { - longestLabel := 0 - for _, v := range content { - if len(v.label) > longestLabel { - longestLabel = len(v.label) - } - } - var output string - for _, v := range content { - output += "\t" + v.label + ":" + strings.Repeat(" ", longestLabel-len(v.label)) + "\t" + indentMessageLines(v.content, longestLabel) + "\n" - } - return output -} - -// Implements asserts that an object is implemented by the specified interface. -// -// assert.Implements(t, (*MyInterface)(nil), new(MyObject)) -func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - interfaceType := reflect.TypeOf(interfaceObject).Elem() - - if object == nil { - return Fail(t, fmt.Sprintf("Cannot check if nil implements %v", interfaceType), msgAndArgs...) - } - if !reflect.TypeOf(object).Implements(interfaceType) { - return Fail(t, fmt.Sprintf("%T must implement %v", object, interfaceType), msgAndArgs...) - } - - return true -} - -// IsType asserts that the specified objects are of the same type. -func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - if !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) { - return Fail(t, fmt.Sprintf("Object expected to be of type %v, but was %v", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...) - } - - return true -} - -// Equal asserts that two objects are equal. -// -// assert.Equal(t, 123, 123) -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). Function equality -// cannot be determined and will always fail. -func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if err := validateEqualArgs(expected, actual); err != nil { - return Fail(t, fmt.Sprintf("Invalid operation: %#v == %#v (%s)", - expected, actual, err), msgAndArgs...) - } - - if !ObjectsAreEqual(expected, actual) { - diff := diff(expected, actual) - expected, actual = formatUnequalValues(expected, actual) - return Fail(t, fmt.Sprintf("Not equal: \n"+ - "expected: %s\n"+ - "actual : %s%s", expected, actual, diff), msgAndArgs...) - } - - return true - -} - -// validateEqualArgs checks whether provided arguments can be safely used in the -// Equal/NotEqual functions. -func validateEqualArgs(expected, actual interface{}) error { - if expected == nil && actual == nil { - return nil - } - - if isFunction(expected) || isFunction(actual) { - return errors.New("cannot take func type as argument") - } - return nil -} - -// Same asserts that two pointers reference the same object. -// -// assert.Same(t, ptr1, ptr2) -// -// Both arguments must be pointer variables. Pointer variable sameness is -// determined based on the equality of both type and value. -func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - if !samePointers(expected, actual) { - return Fail(t, fmt.Sprintf("Not same: \n"+ - "expected: %p %#v\n"+ - "actual : %p %#v", expected, expected, actual, actual), msgAndArgs...) - } - - return true -} - -// NotSame asserts that two pointers do not reference the same object. -// -// assert.NotSame(t, ptr1, ptr2) -// -// Both arguments must be pointer variables. Pointer variable sameness is -// determined based on the equality of both type and value. -func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - if samePointers(expected, actual) { - return Fail(t, fmt.Sprintf( - "Expected and actual point to the same object: %p %#v", - expected, expected), msgAndArgs...) - } - return true -} - -// samePointers compares two generic interface objects and returns whether -// they point to the same object -func samePointers(first, second interface{}) bool { - firstPtr, secondPtr := reflect.ValueOf(first), reflect.ValueOf(second) - if firstPtr.Kind() != reflect.Ptr || secondPtr.Kind() != reflect.Ptr { - return false - } - - firstType, secondType := reflect.TypeOf(first), reflect.TypeOf(second) - if firstType != secondType { - return false - } - - // compare pointer addresses - return first == second -} - -// formatUnequalValues takes two values of arbitrary types and returns string -// representations appropriate to be presented to the user. -// -// If the values are not of like type, the returned strings will be prefixed -// with the type name, and the value will be enclosed in parenthesis similar -// to a type conversion in the Go grammar. -func formatUnequalValues(expected, actual interface{}) (e string, a string) { - if reflect.TypeOf(expected) != reflect.TypeOf(actual) { - return fmt.Sprintf("%T(%s)", expected, truncatingFormat(expected)), - fmt.Sprintf("%T(%s)", actual, truncatingFormat(actual)) - } - return truncatingFormat(expected), truncatingFormat(actual) -} - -// truncatingFormat formats the data and truncates it if it's too long. -// -// This helps keep formatted error messages lines from exceeding the -// bufio.MaxScanTokenSize max line length that the go testing framework imposes. -func truncatingFormat(data interface{}) string { - value := fmt.Sprintf("%#v", data) - max := bufio.MaxScanTokenSize - 100 // Give us some space the type info too if needed. - if len(value) > max { - value = value[0:max] + "<... truncated>" - } - return value -} - -// EqualValues asserts that two objects are equal or convertable to the same types -// and equal. -// -// assert.EqualValues(t, uint32(123), int32(123)) -func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - if !ObjectsAreEqualValues(expected, actual) { - diff := diff(expected, actual) - expected, actual = formatUnequalValues(expected, actual) - return Fail(t, fmt.Sprintf("Not equal: \n"+ - "expected: %s\n"+ - "actual : %s%s", expected, actual, diff), msgAndArgs...) - } - - return true - -} - -// Exactly asserts that two objects are equal in value and type. -// -// assert.Exactly(t, int32(123), int64(123)) -func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - aType := reflect.TypeOf(expected) - bType := reflect.TypeOf(actual) - - if aType != bType { - return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...) - } - - return Equal(t, expected, actual, msgAndArgs...) - -} - -// NotNil asserts that the specified object is not nil. -// -// assert.NotNil(t, err) -func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - if !isNil(object) { - return true - } - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Fail(t, "Expected value not to be nil.", msgAndArgs...) -} - -// containsKind checks if a specified kind in the slice of kinds. -func containsKind(kinds []reflect.Kind, kind reflect.Kind) bool { - for i := 0; i < len(kinds); i++ { - if kind == kinds[i] { - return true - } - } - - return false -} - -// isNil checks if a specified object is nil or not, without Failing. -func isNil(object interface{}) bool { - if object == nil { - return true - } - - value := reflect.ValueOf(object) - kind := value.Kind() - isNilableKind := containsKind( - []reflect.Kind{ - reflect.Chan, reflect.Func, - reflect.Interface, reflect.Map, - reflect.Ptr, reflect.Slice}, - kind) - - if isNilableKind && value.IsNil() { - return true - } - - return false -} - -// Nil asserts that the specified object is nil. -// -// assert.Nil(t, err) -func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - if isNil(object) { - return true - } - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Fail(t, fmt.Sprintf("Expected nil, but got: %#v", object), msgAndArgs...) -} - -// isEmpty gets whether the specified object is considered empty or not. -func isEmpty(object interface{}) bool { - - // get nil case out of the way - if object == nil { - return true - } - - objValue := reflect.ValueOf(object) - - switch objValue.Kind() { - // collection types are empty when they have no element - case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: - return objValue.Len() == 0 - // pointers are empty if nil or if the value they point to is empty - case reflect.Ptr: - if objValue.IsNil() { - return true - } - deref := objValue.Elem().Interface() - return isEmpty(deref) - // for all other types, compare against the zero value - default: - zero := reflect.Zero(objValue.Type()) - return reflect.DeepEqual(object, zero.Interface()) - } -} - -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// assert.Empty(t, obj) -func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - pass := isEmpty(object) - if !pass { - if h, ok := t.(tHelper); ok { - h.Helper() - } - Fail(t, fmt.Sprintf("Should be empty, but was %v", object), msgAndArgs...) - } - - return pass - -} - -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// if assert.NotEmpty(t, obj) { -// assert.Equal(t, "two", obj[1]) -// } -func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - pass := !isEmpty(object) - if !pass { - if h, ok := t.(tHelper); ok { - h.Helper() - } - Fail(t, fmt.Sprintf("Should NOT be empty, but was %v", object), msgAndArgs...) - } - - return pass - -} - -// getLen try to get length of object. -// return (false, 0) if impossible. -func getLen(x interface{}) (ok bool, length int) { - v := reflect.ValueOf(x) - defer func() { - if e := recover(); e != nil { - ok = false - } - }() - return true, v.Len() -} - -// Len asserts that the specified object has specific length. -// Len also fails if the object has a type that len() not accept. -// -// assert.Len(t, mySlice, 3) -func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - ok, l := getLen(object) - if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", object), msgAndArgs...) - } - - if l != length { - return Fail(t, fmt.Sprintf("\"%s\" should have %d item(s), but has %d", object, length, l), msgAndArgs...) - } - return true -} - -// True asserts that the specified value is true. -// -// assert.True(t, myBool) -func True(t TestingT, value bool, msgAndArgs ...interface{}) bool { - if !value { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Fail(t, "Should be true", msgAndArgs...) - } - - return true - -} - -// False asserts that the specified value is false. -// -// assert.False(t, myBool) -func False(t TestingT, value bool, msgAndArgs ...interface{}) bool { - if value { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Fail(t, "Should be false", msgAndArgs...) - } - - return true - -} - -// NotEqual asserts that the specified values are NOT equal. -// -// assert.NotEqual(t, obj1, obj2) -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). -func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if err := validateEqualArgs(expected, actual); err != nil { - return Fail(t, fmt.Sprintf("Invalid operation: %#v != %#v (%s)", - expected, actual, err), msgAndArgs...) - } - - if ObjectsAreEqual(expected, actual) { - return Fail(t, fmt.Sprintf("Should not be: %#v\n", actual), msgAndArgs...) - } - - return true - -} - -// NotEqualValues asserts that two objects are not equal even when converted to the same type -// -// assert.NotEqualValues(t, obj1, obj2) -func NotEqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - if ObjectsAreEqualValues(expected, actual) { - return Fail(t, fmt.Sprintf("Should not be: %#v\n", actual), msgAndArgs...) - } - - return true -} - -// containsElement try loop over the list check if the list includes the element. -// return (false, false) if impossible. -// return (true, false) if element was not found. -// return (true, true) if element was found. -func includeElement(list interface{}, element interface{}) (ok, found bool) { - - listValue := reflect.ValueOf(list) - listKind := reflect.TypeOf(list).Kind() - defer func() { - if e := recover(); e != nil { - ok = false - found = false - } - }() - - if listKind == reflect.String { - elementValue := reflect.ValueOf(element) - return true, strings.Contains(listValue.String(), elementValue.String()) - } - - if listKind == reflect.Map { - mapKeys := listValue.MapKeys() - for i := 0; i < len(mapKeys); i++ { - if ObjectsAreEqual(mapKeys[i].Interface(), element) { - return true, true - } - } - return true, false - } - - for i := 0; i < listValue.Len(); i++ { - if ObjectsAreEqual(listValue.Index(i).Interface(), element) { - return true, true - } - } - return true, false - -} - -// Contains asserts that the specified string, list(array, slice...) or map contains the -// specified substring or element. -// -// assert.Contains(t, "Hello World", "World") -// assert.Contains(t, ["Hello", "World"], "World") -// assert.Contains(t, {"Hello": "World"}, "Hello") -func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - ok, found := includeElement(s, contains) - if !ok { - return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", s), msgAndArgs...) - } - if !found { - return Fail(t, fmt.Sprintf("%#v does not contain %#v", s, contains), msgAndArgs...) - } - - return true - -} - -// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the -// specified substring or element. -// -// assert.NotContains(t, "Hello World", "Earth") -// assert.NotContains(t, ["Hello", "World"], "Earth") -// assert.NotContains(t, {"Hello": "World"}, "Earth") -func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - ok, found := includeElement(s, contains) - if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) - } - if found { - return Fail(t, fmt.Sprintf("\"%s\" should not contain \"%s\"", s, contains), msgAndArgs...) - } - - return true - -} - -// Subset asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). -// -// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") -func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if subset == nil { - return true // we consider nil to be equal to the nil set - } - - subsetValue := reflect.ValueOf(subset) - defer func() { - if e := recover(); e != nil { - ok = false - } - }() - - listKind := reflect.TypeOf(list).Kind() - subsetKind := reflect.TypeOf(subset).Kind() - - if listKind != reflect.Array && listKind != reflect.Slice { - return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) - } - - if subsetKind != reflect.Array && subsetKind != reflect.Slice { - return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) - } - - for i := 0; i < subsetValue.Len(); i++ { - element := subsetValue.Index(i).Interface() - ok, found := includeElement(list, element) - if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) - } - if !found { - return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, element), msgAndArgs...) - } - } - - return true -} - -// NotSubset asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). -// -// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") -func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if subset == nil { - return Fail(t, fmt.Sprintf("nil is the empty set which is a subset of every set"), msgAndArgs...) - } - - subsetValue := reflect.ValueOf(subset) - defer func() { - if e := recover(); e != nil { - ok = false - } - }() - - listKind := reflect.TypeOf(list).Kind() - subsetKind := reflect.TypeOf(subset).Kind() - - if listKind != reflect.Array && listKind != reflect.Slice { - return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) - } - - if subsetKind != reflect.Array && subsetKind != reflect.Slice { - return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) - } - - for i := 0; i < subsetValue.Len(); i++ { - element := subsetValue.Index(i).Interface() - ok, found := includeElement(list, element) - if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) - } - if !found { - return true - } - } - - return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...) -} - -// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified -// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, -// the number of appearances of each of them in both lists should match. -// -// assert.ElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2]) -func ElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface{}) (ok bool) { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if isEmpty(listA) && isEmpty(listB) { - return true - } - - if !isList(t, listA, msgAndArgs...) || !isList(t, listB, msgAndArgs...) { - return false - } - - extraA, extraB := diffLists(listA, listB) - - if len(extraA) == 0 && len(extraB) == 0 { - return true - } - - return Fail(t, formatListDiff(listA, listB, extraA, extraB), msgAndArgs...) -} - -// isList checks that the provided value is array or slice. -func isList(t TestingT, list interface{}, msgAndArgs ...interface{}) (ok bool) { - kind := reflect.TypeOf(list).Kind() - if kind != reflect.Array && kind != reflect.Slice { - return Fail(t, fmt.Sprintf("%q has an unsupported type %s, expecting array or slice", list, kind), - msgAndArgs...) - } - return true -} - -// diffLists diffs two arrays/slices and returns slices of elements that are only in A and only in B. -// If some element is present multiple times, each instance is counted separately (e.g. if something is 2x in A and -// 5x in B, it will be 0x in extraA and 3x in extraB). The order of items in both lists is ignored. -func diffLists(listA, listB interface{}) (extraA, extraB []interface{}) { - aValue := reflect.ValueOf(listA) - bValue := reflect.ValueOf(listB) - - aLen := aValue.Len() - bLen := bValue.Len() - - // Mark indexes in bValue that we already used - visited := make([]bool, bLen) - for i := 0; i < aLen; i++ { - element := aValue.Index(i).Interface() - found := false - for j := 0; j < bLen; j++ { - if visited[j] { - continue - } - if ObjectsAreEqual(bValue.Index(j).Interface(), element) { - visited[j] = true - found = true - break - } - } - if !found { - extraA = append(extraA, element) - } - } - - for j := 0; j < bLen; j++ { - if visited[j] { - continue - } - extraB = append(extraB, bValue.Index(j).Interface()) - } - - return -} - -func formatListDiff(listA, listB interface{}, extraA, extraB []interface{}) string { - var msg bytes.Buffer - - msg.WriteString("elements differ") - if len(extraA) > 0 { - msg.WriteString("\n\nextra elements in list A:\n") - msg.WriteString(spewConfig.Sdump(extraA)) - } - if len(extraB) > 0 { - msg.WriteString("\n\nextra elements in list B:\n") - msg.WriteString(spewConfig.Sdump(extraB)) - } - msg.WriteString("\n\nlistA:\n") - msg.WriteString(spewConfig.Sdump(listA)) - msg.WriteString("\n\nlistB:\n") - msg.WriteString(spewConfig.Sdump(listB)) - - return msg.String() -} - -// Condition uses a Comparison to assert a complex condition. -func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - result := comp() - if !result { - Fail(t, "Condition failed!", msgAndArgs...) - } - return result -} - -// PanicTestFunc defines a func that should be passed to the assert.Panics and assert.NotPanics -// methods, and represents a simple func that takes no arguments, and returns nothing. -type PanicTestFunc func() - -// didPanic returns true if the function passed to it panics. Otherwise, it returns false. -func didPanic(f PanicTestFunc) (bool, interface{}, string) { - - didPanic := false - var message interface{} - var stack string - func() { - - defer func() { - if message = recover(); message != nil { - didPanic = true - stack = string(debug.Stack()) - } - }() - - // call the target function - f() - - }() - - return didPanic, message, stack - -} - -// Panics asserts that the code inside the specified PanicTestFunc panics. -// -// assert.Panics(t, func(){ GoCrazy() }) -func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - if funcDidPanic, panicValue, _ := didPanic(f); !funcDidPanic { - return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...) - } - - return true -} - -// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that -// the recovered panic value equals the expected panic value. -// -// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() }) -func PanicsWithValue(t TestingT, expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - funcDidPanic, panicValue, panickedStack := didPanic(f) - if !funcDidPanic { - return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...) - } - if panicValue != expected { - return Fail(t, fmt.Sprintf("func %#v should panic with value:\t%#v\n\tPanic value:\t%#v\n\tPanic stack:\t%s", f, expected, panicValue, panickedStack), msgAndArgs...) - } - - return true -} - -// PanicsWithError asserts that the code inside the specified PanicTestFunc -// panics, and that the recovered panic value is an error that satisfies the -// EqualError comparison. -// -// assert.PanicsWithError(t, "crazy error", func(){ GoCrazy() }) -func PanicsWithError(t TestingT, errString string, f PanicTestFunc, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - funcDidPanic, panicValue, panickedStack := didPanic(f) - if !funcDidPanic { - return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...) - } - panicErr, ok := panicValue.(error) - if !ok || panicErr.Error() != errString { - return Fail(t, fmt.Sprintf("func %#v should panic with error message:\t%#v\n\tPanic value:\t%#v\n\tPanic stack:\t%s", f, errString, panicValue, panickedStack), msgAndArgs...) - } - - return true -} - -// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. -// -// assert.NotPanics(t, func(){ RemainCalm() }) -func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - if funcDidPanic, panicValue, panickedStack := didPanic(f); funcDidPanic { - return Fail(t, fmt.Sprintf("func %#v should not panic\n\tPanic value:\t%v\n\tPanic stack:\t%s", f, panicValue, panickedStack), msgAndArgs...) - } - - return true -} - -func toFloat(x interface{}) (float64, bool) { - var xf float64 - xok := true - - switch xn := x.(type) { - case uint: - xf = float64(xn) - case uint8: - xf = float64(xn) - case uint16: - xf = float64(xn) - case uint32: - xf = float64(xn) - case uint64: - xf = float64(xn) - case int: - xf = float64(xn) - case int8: - xf = float64(xn) - case int16: - xf = float64(xn) - case int32: - xf = float64(xn) - case int64: - xf = float64(xn) - case float32: - xf = float64(xn) - case float64: - xf = xn - default: - xok = false - } - - return xf, xok -} - -// InDelta asserts that the two numerals are within delta of each other. -// -// assert.InDelta(t, math.Pi, 22/7.0, 0.01) -func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - af, aok := toFloat(expected) - bf, bok := toFloat(actual) - - if !aok || !bok { - return Fail(t, fmt.Sprintf("Parameters must be numerical"), msgAndArgs...) - } - - if math.IsNaN(af) { - return Fail(t, fmt.Sprintf("Expected must not be NaN"), msgAndArgs...) - } - - if math.IsNaN(bf) { - return Fail(t, fmt.Sprintf("Expected %v with delta %v, but was NaN", expected, delta), msgAndArgs...) - } - - dt := af - bf - if dt < -delta || dt > delta { - return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...) - } - - return true -} - -// InDeltaSlice is the same as InDelta, except it compares two slices. -func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if expected == nil || actual == nil || - reflect.TypeOf(actual).Kind() != reflect.Slice || - reflect.TypeOf(expected).Kind() != reflect.Slice { - return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) - } - - actualSlice := reflect.ValueOf(actual) - expectedSlice := reflect.ValueOf(expected) - - for i := 0; i < actualSlice.Len(); i++ { - result := InDelta(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), delta, msgAndArgs...) - if !result { - return result - } - } - - return true -} - -// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. -func InDeltaMapValues(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if expected == nil || actual == nil || - reflect.TypeOf(actual).Kind() != reflect.Map || - reflect.TypeOf(expected).Kind() != reflect.Map { - return Fail(t, "Arguments must be maps", msgAndArgs...) - } - - expectedMap := reflect.ValueOf(expected) - actualMap := reflect.ValueOf(actual) - - if expectedMap.Len() != actualMap.Len() { - return Fail(t, "Arguments must have the same number of keys", msgAndArgs...) - } - - for _, k := range expectedMap.MapKeys() { - ev := expectedMap.MapIndex(k) - av := actualMap.MapIndex(k) - - if !ev.IsValid() { - return Fail(t, fmt.Sprintf("missing key %q in expected map", k), msgAndArgs...) - } - - if !av.IsValid() { - return Fail(t, fmt.Sprintf("missing key %q in actual map", k), msgAndArgs...) - } - - if !InDelta( - t, - ev.Interface(), - av.Interface(), - delta, - msgAndArgs..., - ) { - return false - } - } - - return true -} - -func calcRelativeError(expected, actual interface{}) (float64, error) { - af, aok := toFloat(expected) - if !aok { - return 0, fmt.Errorf("expected value %q cannot be converted to float", expected) - } - if math.IsNaN(af) { - return 0, errors.New("expected value must not be NaN") - } - if af == 0 { - return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error") - } - bf, bok := toFloat(actual) - if !bok { - return 0, fmt.Errorf("actual value %q cannot be converted to float", actual) - } - if math.IsNaN(bf) { - return 0, errors.New("actual value must not be NaN") - } - - return math.Abs(af-bf) / math.Abs(af), nil -} - -// InEpsilon asserts that expected and actual have a relative error less than epsilon -func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if math.IsNaN(epsilon) { - return Fail(t, "epsilon must not be NaN") - } - actualEpsilon, err := calcRelativeError(expected, actual) - if err != nil { - return Fail(t, err.Error(), msgAndArgs...) - } - if actualEpsilon > epsilon { - return Fail(t, fmt.Sprintf("Relative error is too high: %#v (expected)\n"+ - " < %#v (actual)", epsilon, actualEpsilon), msgAndArgs...) - } - - return true -} - -// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices. -func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if expected == nil || actual == nil || - reflect.TypeOf(actual).Kind() != reflect.Slice || - reflect.TypeOf(expected).Kind() != reflect.Slice { - return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) - } - - actualSlice := reflect.ValueOf(actual) - expectedSlice := reflect.ValueOf(expected) - - for i := 0; i < actualSlice.Len(); i++ { - result := InEpsilon(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), epsilon) - if !result { - return result - } - } - - return true -} - -/* - Errors -*/ - -// NoError asserts that a function returned no error (i.e. `nil`). -// -// actualObj, err := SomeFunction() -// if assert.NoError(t, err) { -// assert.Equal(t, expectedObj, actualObj) -// } -func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { - if err != nil { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Fail(t, fmt.Sprintf("Received unexpected error:\n%+v", err), msgAndArgs...) - } - - return true -} - -// Error asserts that a function returned an error (i.e. not `nil`). -// -// actualObj, err := SomeFunction() -// if assert.Error(t, err) { -// assert.Equal(t, expectedError, err) -// } -func Error(t TestingT, err error, msgAndArgs ...interface{}) bool { - if err == nil { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Fail(t, "An error is expected but got nil.", msgAndArgs...) - } - - return true -} - -// EqualError asserts that a function returned an error (i.e. not `nil`) -// and that it is equal to the provided error. -// -// actualObj, err := SomeFunction() -// assert.EqualError(t, err, expectedErrorString) -func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if !Error(t, theError, msgAndArgs...) { - return false - } - expected := errString - actual := theError.Error() - // don't need to use deep equals here, we know they are both strings - if expected != actual { - return Fail(t, fmt.Sprintf("Error message not equal:\n"+ - "expected: %q\n"+ - "actual : %q", expected, actual), msgAndArgs...) - } - return true -} - -// matchRegexp return true if a specified regexp matches a string. -func matchRegexp(rx interface{}, str interface{}) bool { - - var r *regexp.Regexp - if rr, ok := rx.(*regexp.Regexp); ok { - r = rr - } else { - r = regexp.MustCompile(fmt.Sprint(rx)) - } - - return (r.FindStringIndex(fmt.Sprint(str)) != nil) - -} - -// Regexp asserts that a specified regexp matches a string. -// -// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") -// assert.Regexp(t, "start...$", "it's not starting") -func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - match := matchRegexp(rx, str) - - if !match { - Fail(t, fmt.Sprintf("Expect \"%v\" to match \"%v\"", str, rx), msgAndArgs...) - } - - return match -} - -// NotRegexp asserts that a specified regexp does not match a string. -// -// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") -// assert.NotRegexp(t, "^start", "it's not starting") -func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - match := matchRegexp(rx, str) - - if match { - Fail(t, fmt.Sprintf("Expect \"%v\" to NOT match \"%v\"", str, rx), msgAndArgs...) - } - - return !match - -} - -// Zero asserts that i is the zero value for its type. -func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if i != nil && !reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) { - return Fail(t, fmt.Sprintf("Should be zero, but was %v", i), msgAndArgs...) - } - return true -} - -// NotZero asserts that i is not the zero value for its type. -func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if i == nil || reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) { - return Fail(t, fmt.Sprintf("Should not be zero, but was %v", i), msgAndArgs...) - } - return true -} - -// JSONEq asserts that two JSON strings are equivalent. -// -// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) -func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - var expectedJSONAsInterface, actualJSONAsInterface interface{} - - if err := json.Unmarshal([]byte(expected), &expectedJSONAsInterface); err != nil { - return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid json.\nJSON parsing error: '%s'", expected, err.Error()), msgAndArgs...) - } - - if err := json.Unmarshal([]byte(actual), &actualJSONAsInterface); err != nil { - return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid json.\nJSON parsing error: '%s'", actual, err.Error()), msgAndArgs...) - } - - return Equal(t, expectedJSONAsInterface, actualJSONAsInterface, msgAndArgs...) -} - -func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { - t := reflect.TypeOf(v) - k := t.Kind() - - if k == reflect.Ptr { - t = t.Elem() - k = t.Kind() - } - return t, k -} - -// diff returns a diff of both values as long as both are of the same type and -// are a struct, map, slice, array or string. Otherwise it returns an empty string. -func diff(expected interface{}, actual interface{}) string { - if expected == nil || actual == nil { - return "" - } - - et, ek := typeAndKind(expected) - at, _ := typeAndKind(actual) - - if et != at { - return "" - } - - if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array && ek != reflect.String { - return "" - } - - var e, a string - if et != reflect.TypeOf("") { - e = spewConfig.Sdump(expected) - a = spewConfig.Sdump(actual) - } else { - e = reflect.ValueOf(expected).String() - a = reflect.ValueOf(actual).String() - } - - diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ - A: difflib.SplitLines(e), - B: difflib.SplitLines(a), - FromFile: "Expected", - FromDate: "", - ToFile: "Actual", - ToDate: "", - Context: 1, - }) - - return "\n\nDiff:\n" + diff -} - -func isFunction(arg interface{}) bool { - if arg == nil { - return false - } - return reflect.TypeOf(arg).Kind() == reflect.Func -} - -var spewConfig = spew.ConfigState{ - Indent: " ", - DisablePointerAddresses: true, - DisableCapacities: true, - SortKeys: true, - DisableMethods: true, - MaxDepth: 10, -} - -type tHelper interface { - Helper() -} - -// ErrorIs asserts that at least one of the errors in err's chain matches target. -// This is a wrapper for errors.Is. -func ErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if errors.Is(err, target) { - return true - } - - var expectedText string - if target != nil { - expectedText = target.Error() - } - - chain := buildErrorChainString(err) - - return Fail(t, fmt.Sprintf("Target error should be in err chain:\n"+ - "expected: %q\n"+ - "in chain: %s", expectedText, chain, - ), msgAndArgs...) -} - -// NotErrorIs asserts that at none of the errors in err's chain matches target. -// This is a wrapper for errors.Is. -func NotErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if !errors.Is(err, target) { - return true - } - - var expectedText string - if target != nil { - expectedText = target.Error() - } - - chain := buildErrorChainString(err) - - return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+ - "found: %q\n"+ - "in chain: %s", expectedText, chain, - ), msgAndArgs...) -} - -// ErrorAs asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value. -// This is a wrapper for errors.As. -func ErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if errors.As(err, target) { - return true - } - - chain := buildErrorChainString(err) - - return Fail(t, fmt.Sprintf("Should be in error chain:\n"+ - "expected: %q\n"+ - "in chain: %s", target, chain, - ), msgAndArgs...) -} - -func buildErrorChainString(err error) string { - if err == nil { - return "" - } - - e := errors.Unwrap(err) - chain := fmt.Sprintf("%q", err.Error()) - for e != nil { - chain += fmt.Sprintf("\n\t%q", e.Error()) - e = errors.Unwrap(e) - } - return chain -} diff --git a/vendor/github.com/stretchr/testify/assert/doc.go b/vendor/github.com/stretchr/testify/assert/doc.go deleted file mode 100644 index c9dccc4..0000000 --- a/vendor/github.com/stretchr/testify/assert/doc.go +++ /dev/null @@ -1,45 +0,0 @@ -// Package assert provides a set of comprehensive testing tools for use with the normal Go testing system. -// -// Example Usage -// -// The following is a complete example using assert in a standard test function: -// import ( -// "testing" -// "github.com/stretchr/testify/assert" -// ) -// -// func TestSomething(t *testing.T) { -// -// var a string = "Hello" -// var b string = "Hello" -// -// assert.Equal(t, a, b, "The two words should be the same.") -// -// } -// -// if you assert many times, use the format below: -// -// import ( -// "testing" -// "github.com/stretchr/testify/assert" -// ) -// -// func TestSomething(t *testing.T) { -// assert := assert.New(t) -// -// var a string = "Hello" -// var b string = "Hello" -// -// assert.Equal(a, b, "The two words should be the same.") -// } -// -// Assertions -// -// Assertions allow you to easily write test code, and are global funcs in the `assert` package. -// All assertion functions take, as the first argument, the `*testing.T` object provided by the -// testing framework. This allows the assertion funcs to write the failings and other details to -// the correct place. -// -// Every assertion function also takes an optional string message as the final argument, -// allowing custom error messages to be appended to the message the assertion method outputs. -package assert diff --git a/vendor/github.com/stretchr/testify/assert/errors.go b/vendor/github.com/stretchr/testify/assert/errors.go deleted file mode 100644 index ac9dc9d..0000000 --- a/vendor/github.com/stretchr/testify/assert/errors.go +++ /dev/null @@ -1,10 +0,0 @@ -package assert - -import ( - "errors" -) - -// AnError is an error instance useful for testing. If the code does not care -// about error specifics, and only needs to return the error for example, this -// error should be used to make the test code more readable. -var AnError = errors.New("assert.AnError general error for testing") diff --git a/vendor/github.com/stretchr/testify/assert/forward_assertions.go b/vendor/github.com/stretchr/testify/assert/forward_assertions.go deleted file mode 100644 index df189d2..0000000 --- a/vendor/github.com/stretchr/testify/assert/forward_assertions.go +++ /dev/null @@ -1,16 +0,0 @@ -package assert - -// Assertions provides assertion methods around the -// TestingT interface. -type Assertions struct { - t TestingT -} - -// New makes a new Assertions object for the specified TestingT. -func New(t TestingT) *Assertions { - return &Assertions{ - t: t, - } -} - -//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_forward.go.tmpl -include-format-funcs" diff --git a/vendor/github.com/whyrusleeping/cbor-gen/LICENSE b/vendor/github.com/whyrusleeping/cbor-gen/LICENSE deleted file mode 100644 index 747f056..0000000 --- a/vendor/github.com/whyrusleeping/cbor-gen/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2019 Jeromy Johnson - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/whyrusleeping/cbor-gen/Makefile b/vendor/github.com/whyrusleeping/cbor-gen/Makefile deleted file mode 100644 index cbb8d39..0000000 --- a/vendor/github.com/whyrusleeping/cbor-gen/Makefile +++ /dev/null @@ -1,8 +0,0 @@ -gentest: - rm -rf ./testing/cbor_gen.go ./testing/cbor_map_gen.go - go run ./testgen/main.go -.PHONY: gentest - -test: gentest - go test ./... -.PHONY: test diff --git a/vendor/github.com/whyrusleeping/cbor-gen/README.md b/vendor/github.com/whyrusleeping/cbor-gen/README.md deleted file mode 100644 index 93603a9..0000000 --- a/vendor/github.com/whyrusleeping/cbor-gen/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# cbor-gen - -Some basic utilities to generate fast path cbor codecs for your types. - -## License -MIT diff --git a/vendor/github.com/whyrusleeping/cbor-gen/cbor_cid.go b/vendor/github.com/whyrusleeping/cbor-gen/cbor_cid.go deleted file mode 100644 index 57d6ec2..0000000 --- a/vendor/github.com/whyrusleeping/cbor-gen/cbor_cid.go +++ /dev/null @@ -1,22 +0,0 @@ -package typegen - -import ( - "io" - - cid "github.com/ipfs/go-cid" -) - -type CborCid cid.Cid - -func (c CborCid) MarshalCBOR(w io.Writer) error { - return WriteCid(w, cid.Cid(c)) -} - -func (c *CborCid) UnmarshalCBOR(r io.Reader) error { - oc, err := ReadCid(r) - if err != nil { - return err - } - *c = CborCid(oc) - return nil -} diff --git a/vendor/github.com/whyrusleeping/cbor-gen/io.go b/vendor/github.com/whyrusleeping/cbor-gen/io.go deleted file mode 100644 index 2eb7d19..0000000 --- a/vendor/github.com/whyrusleeping/cbor-gen/io.go +++ /dev/null @@ -1,81 +0,0 @@ -package typegen - -import ( - "io" -) - -var ( - _ io.Reader = (*CborReader)(nil) - _ io.ByteScanner = (*CborReader)(nil) -) - -type CborReader struct { - r BytePeeker - hbuf []byte -} - -func NewCborReader(r io.Reader) *CborReader { - if r, ok := r.(*CborReader); ok { - return r - } - - return &CborReader{ - r: GetPeeker(r), - hbuf: make([]byte, maxHeaderSize), - } -} - -func (cr *CborReader) Read(p []byte) (n int, err error) { - return cr.r.Read(p) -} - -func (cr *CborReader) ReadByte() (byte, error) { - return cr.r.ReadByte() -} - -func (cr *CborReader) UnreadByte() error { - return cr.r.UnreadByte() -} - -func (cr *CborReader) ReadHeader() (byte, uint64, error) { - return CborReadHeaderBuf(cr.r, cr.hbuf) -} - -var ( - _ io.Writer = (*CborWriter)(nil) - _ io.StringWriter = (*CborWriter)(nil) -) - -type CborWriter struct { - w io.Writer - hbuf []byte -} - -func NewCborWriter(w io.Writer) *CborWriter { - if w, ok := w.(*CborWriter); ok { - return w - } - return &CborWriter{ - w: w, - hbuf: make([]byte, maxHeaderSize), - } -} - -func (cw *CborWriter) Write(p []byte) (n int, err error) { - return cw.w.Write(p) -} - -func (cw *CborWriter) WriteMajorTypeHeader(t byte, l uint64) error { - return WriteMajorTypeHeaderBuf(cw.hbuf, cw.w, t, l) -} - -func (cw *CborWriter) CborWriteHeader(t byte, l uint64) error { - return WriteMajorTypeHeaderBuf(cw.hbuf, cw.w, t, l) -} - -func (cw *CborWriter) WriteString(s string) (int, error) { - if sw, ok := cw.w.(io.StringWriter); ok { - return sw.WriteString(s) - } - return cw.w.Write([]byte(s)) -} diff --git a/vendor/github.com/whyrusleeping/cbor-gen/package.go b/vendor/github.com/whyrusleeping/cbor-gen/package.go deleted file mode 100644 index c8b3cf3..0000000 --- a/vendor/github.com/whyrusleeping/cbor-gen/package.go +++ /dev/null @@ -1,113 +0,0 @@ -package typegen - -import ( - "fmt" - "math/big" - "reflect" - "sort" - "strings" - "sync" - - cid "github.com/ipfs/go-cid" -) - -const MaxLength = 8192 - -const ByteArrayMaxLen = 2 << 20 - -var ( - cidType = reflect.TypeOf(cid.Cid{}) - bigIntType = reflect.TypeOf(big.Int{}) - deferredType = reflect.TypeOf(Deferred{}) -) - -var ( - knownPackageNamesMu sync.Mutex - pkgNameToPkgPath = make(map[string]string) - pkgPathToPkgName = make(map[string]string) - - defaultImports = []Import{ - {Name: "cbg", PkgPath: "github.com/whyrusleeping/cbor-gen"}, - {Name: "xerrors", PkgPath: "golang.org/x/xerrors"}, - {Name: "cid", PkgPath: "github.com/ipfs/go-cid"}, - } -) - -func init() { - for _, imp := range defaultImports { - if was, conflict := pkgNameToPkgPath[imp.Name]; conflict { - panic(fmt.Sprintf("reused pkg name %s for %s and %s", imp.Name, imp.PkgPath, was)) - } - if _, conflict := pkgPathToPkgName[imp.Name]; conflict { - panic(fmt.Sprintf("duplicate default import %s", imp.PkgPath)) - } - pkgNameToPkgPath[imp.Name] = imp.PkgPath - pkgPathToPkgName[imp.PkgPath] = imp.Name - } -} - -func resolvePkgName(path, typeName string) string { - parts := strings.Split(typeName, ".") - if len(parts) != 2 { - panic(fmt.Sprintf("expected type to have a package name: %s", typeName)) - } - defaultName := parts[0] - - knownPackageNamesMu.Lock() - defer knownPackageNamesMu.Unlock() - - // Check for a known name and use it. - if name, ok := pkgPathToPkgName[path]; ok { - return name - } - - // Allocate a name. - for i := 0; ; i++ { - tryName := defaultName - if i > 0 { - tryName = fmt.Sprintf("%s%d", defaultName, i) - } - if _, taken := pkgNameToPkgPath[tryName]; !taken { - pkgNameToPkgPath[tryName] = path - pkgPathToPkgName[path] = tryName - return tryName - } - } - -} - -type Import struct { - Name, PkgPath string -} - -func ImportsForType(currPkg string, t reflect.Type) []Import { - switch t.Kind() { - case reflect.Array, reflect.Slice, reflect.Ptr: - return ImportsForType(currPkg, t.Elem()) - case reflect.Map: - return dedupImports(append(ImportsForType(currPkg, t.Key()), ImportsForType(currPkg, t.Elem())...)) - default: - path := t.PkgPath() - if path == "" || path == currPkg { - // built-in or in current package. - return nil - } - - return []Import{{PkgPath: path, Name: resolvePkgName(path, t.String())}} - } -} - -func dedupImports(imps []Import) []Import { - impSet := make(map[string]string, len(imps)) - for _, imp := range imps { - impSet[imp.PkgPath] = imp.Name - } - deduped := make([]Import, 0, len(imps)) - for pkg, name := range impSet { - deduped = append(deduped, Import{Name: name, PkgPath: pkg}) - } - sort.Slice(deduped, func(i, j int) bool { - return deduped[i].PkgPath < deduped[j].PkgPath - }) - return deduped -} diff --git a/vendor/github.com/whyrusleeping/cbor-gen/peeker.go b/vendor/github.com/whyrusleeping/cbor-gen/peeker.go deleted file mode 100644 index 3600a84..0000000 --- a/vendor/github.com/whyrusleeping/cbor-gen/peeker.go +++ /dev/null @@ -1,80 +0,0 @@ -package typegen - -import ( - "bufio" - "io" -) - -// BytePeeker combines the Reader and ByteScanner interfaces. -type BytePeeker interface { - io.Reader - io.ByteScanner -} - -func GetPeeker(r io.Reader) BytePeeker { - if r, ok := r.(BytePeeker); ok { - return r - } - return &peeker{reader: r} -} - -// peeker is a non-buffering BytePeeker. -type peeker struct { - reader io.Reader - peekState int - lastByte byte -} - -const ( - peekEmpty = iota - peekSet - peekUnread -) - -func (p *peeker) Read(buf []byte) (n int, err error) { - // Read "nothing". I.e., read an error, maybe. - if len(buf) == 0 { - // There's something pending in the - if p.peekState == peekUnread { - return 0, nil - } - return p.reader.Read(nil) - } - - if p.peekState == peekUnread { - buf[0] = p.lastByte - n, err = p.reader.Read(buf[1:]) - n += 1 - } else { - n, err = p.reader.Read(buf) - } - if n > 0 { - p.peekState = peekSet - p.lastByte = buf[n-1] - } - return n, err -} - -func (p *peeker) ReadByte() (byte, error) { - if p.peekState == peekUnread { - p.peekState = peekSet - return p.lastByte, nil - } - var buf [1]byte - _, err := io.ReadFull(p.reader, buf[:]) - if err != nil { - return 0, err - } - b := buf[0] - p.lastByte = b - p.peekState = peekSet - return b, nil -} - -func (p *peeker) UnreadByte() error { - if p.peekState != peekSet { - return bufio.ErrInvalidUnreadByte - } - p.peekState = peekUnread - return nil -} diff --git a/vendor/github.com/whyrusleeping/cbor-gen/utils.go b/vendor/github.com/whyrusleeping/cbor-gen/utils.go deleted file mode 100644 index 1a43b89..0000000 --- a/vendor/github.com/whyrusleeping/cbor-gen/utils.go +++ /dev/null @@ -1,750 +0,0 @@ -package typegen - -import ( - "bufio" - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "math" - "sync" - - cid "github.com/ipfs/go-cid" -) - -const ( - maxCidLength = 100 - maxHeaderSize = 9 -) - -// discard is a helper function to discard data from a reader, special-casing -// the most common readers we encounter in this library for a significant -// performance boost. -func discard(br io.Reader, n int) error { - // If we're expecting no bytes, don't even try to read. Otherwise, we may read an EOF. - if n == 0 { - return nil - } - - switch r := br.(type) { - case *bytes.Buffer: - buf := r.Next(n) - if len(buf) == 0 { - return io.EOF - } else if len(buf) < n { - return io.ErrUnexpectedEOF - } - return nil - case *bytes.Reader: - if r.Len() == 0 { - return io.EOF - } else if r.Len() < n { - _, _ = r.Seek(0, io.SeekEnd) - return io.ErrUnexpectedEOF - } - _, err := r.Seek(int64(n), io.SeekCurrent) - return err - case *bufio.Reader: - discarded, err := r.Discard(n) - if discarded != 0 && discarded < n && err == io.EOF { - return io.ErrUnexpectedEOF - } - return err - default: - discarded, err := io.CopyN(io.Discard, br, int64(n)) - if discarded != 0 && discarded < int64(n) && err == io.EOF { - return io.ErrUnexpectedEOF - } - - return err - } -} - -func ScanForLinks(br io.Reader, cb func(cid.Cid)) (err error) { - hasReadOnce := false - defer func() { - if err == io.EOF && hasReadOnce { - err = io.ErrUnexpectedEOF - } - }() - - scratch := make([]byte, maxCidLength) - for remaining := uint64(1); remaining > 0; remaining-- { - maj, extra, err := CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - hasReadOnce = true - - switch maj { - case MajUnsignedInt, MajNegativeInt, MajOther: - case MajByteString, MajTextString: - err := discard(br, int(extra)) - if err != nil { - return err - } - case MajTag: - if extra == 42 { - maj, extra, err = CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if maj != MajByteString { - return fmt.Errorf("expected cbor type 'byte string' in input") - } - - if extra > maxCidLength { - return fmt.Errorf("string in cbor input too long") - } - - if _, err := io.ReadAtLeast(br, scratch[:extra], int(extra)); err != nil { - return err - } - - c, err := cid.Cast(scratch[1:extra]) - if err != nil { - return err - } - cb(c) - - } else { - remaining++ - } - case MajArray: - remaining += extra - case MajMap: - remaining += (extra * 2) - default: - return fmt.Errorf("unhandled cbor type: %d", maj) - } - } - return nil -} - -const ( - MajUnsignedInt = 0 - MajNegativeInt = 1 - MajByteString = 2 - MajTextString = 3 - MajArray = 4 - MajMap = 5 - MajTag = 6 - MajOther = 7 -) - -var maxLengthError = fmt.Errorf("length beyond maximum allowed") - -type CBORUnmarshaler interface { - UnmarshalCBOR(io.Reader) error -} - -type CBORMarshaler interface { - MarshalCBOR(io.Writer) error -} - -type Deferred struct { - Raw []byte -} - -func (d *Deferred) MarshalCBOR(w io.Writer) error { - if d == nil { - _, err := w.Write(CborNull) - return err - } - if d.Raw == nil { - return errors.New("cannot marshal Deferred with nil value for Raw (will not unmarshal)") - } - _, err := w.Write(d.Raw) - return err -} - -func (d *Deferred) UnmarshalCBOR(br io.Reader) (err error) { - // Reuse any existing buffers. - reusedBuf := d.Raw[:0] - d.Raw = nil - buf := bytes.NewBuffer(reusedBuf) - - // Allocate some scratch space. - scratch := make([]byte, maxHeaderSize) - - hasReadOnce := false - defer func() { - if err == io.EOF && hasReadOnce { - err = io.ErrUnexpectedEOF - } - }() - - // Algorithm: - // - // 1. We start off expecting to read one element. - // 2. If we see a tag, we expect to read one more element so we increment "remaining". - // 3. If see an array, we expect to read "extra" elements so we add "extra" to "remaining". - // 4. If see a map, we expect to read "2*extra" elements so we add "2*extra" to "remaining". - // 5. While "remaining" is non-zero, read more elements. - - // define this once so we don't keep allocating it. - limitedReader := io.LimitedReader{R: br} - for remaining := uint64(1); remaining > 0; remaining-- { - maj, extra, err := CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - hasReadOnce = true - if err := WriteMajorTypeHeaderBuf(scratch, buf, maj, extra); err != nil { - return err - } - - switch maj { - case MajUnsignedInt, MajNegativeInt, MajOther: - // nothing fancy to do - case MajByteString, MajTextString: - if extra > ByteArrayMaxLen { - return maxLengthError - } - // Copy the bytes - limitedReader.N = int64(extra) - buf.Grow(int(extra)) - if n, err := buf.ReadFrom(&limitedReader); err != nil { - return err - } else if n < int64(extra) { - return io.ErrUnexpectedEOF - } - case MajTag: - remaining++ - case MajArray: - if extra > MaxLength { - return maxLengthError - } - remaining += extra - case MajMap: - if extra > MaxLength { - return maxLengthError - } - remaining += extra * 2 - default: - return fmt.Errorf("unhandled deferred cbor type: %d", maj) - } - } - d.Raw = buf.Bytes() - return nil -} - -func readByte(r io.Reader) (byte, error) { - // try to cast to a concrete type, it's much faster than casting to an - // interface. - switch r := r.(type) { - case *bytes.Buffer: - return r.ReadByte() - case *bytes.Reader: - return r.ReadByte() - case *bufio.Reader: - return r.ReadByte() - case *peeker: - return r.ReadByte() - case *CborReader: - return readByte(r.r) - case io.ByteReader: - return r.ReadByte() - } - var buf [1]byte - _, err := io.ReadFull(r, buf[:1]) - return buf[0], err -} - -func CborReadHeader(br io.Reader) (byte, uint64, error) { - if cr, ok := br.(*CborReader); ok { - return cr.ReadHeader() - } - - first, err := readByte(br) - if err != nil { - return 0, 0, err - } - defer func() { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - }() - - maj := (first & 0xe0) >> 5 - low := first & 0x1f - - switch { - case low < 24: - return maj, uint64(low), nil - case low == 24: - next, err := readByte(br) - if err != nil { - return 0, 0, err - } - if next < 24 { - return 0, 0, fmt.Errorf("cbor input was not canonical (lval 24 with value < 24)") - } - return maj, uint64(next), nil - case low == 25: - scratch := make([]byte, 2) - if _, err := io.ReadAtLeast(br, scratch[:2], 2); err != nil { - return 0, 0, err - } - val := uint64(binary.BigEndian.Uint16(scratch[:2])) - if val <= math.MaxUint8 { - return 0, 0, fmt.Errorf("cbor input was not canonical (lval 25 with value <= MaxUint8)") - } - return maj, val, nil - case low == 26: - scratch := make([]byte, 4) - if _, err := io.ReadAtLeast(br, scratch[:4], 4); err != nil { - return 0, 0, err - } - val := uint64(binary.BigEndian.Uint32(scratch[:4])) - if val <= math.MaxUint16 { - return 0, 0, fmt.Errorf("cbor input was not canonical (lval 26 with value <= MaxUint16)") - } - return maj, val, nil - case low == 27: - scratch := make([]byte, 8) - if _, err := io.ReadAtLeast(br, scratch, 8); err != nil { - return 0, 0, err - } - val := binary.BigEndian.Uint64(scratch) - if val <= math.MaxUint32 { - return 0, 0, fmt.Errorf("cbor input was not canonical (lval 27 with value <= MaxUint32)") - } - return maj, val, nil - default: - return 0, 0, fmt.Errorf("invalid header: (%x)", first) - } -} - -func readByteBuf(r io.Reader, scratch []byte) (byte, error) { - // Reading a single byte from these buffers is much faster than copying - // into a slice. - switch r := r.(type) { - case *bytes.Buffer: - return r.ReadByte() - case *bytes.Reader: - return r.ReadByte() - case *bufio.Reader: - return r.ReadByte() - case *peeker: - return r.ReadByte() - case *CborReader: - return readByte(r.r) - case io.ByteReader: - return r.ReadByte() - } - _, err := io.ReadFull(r, scratch[:1]) - return scratch[0], err -} - -// same as the above, just tries to allocate less by using a passed in scratch buffer -func CborReadHeaderBuf(br io.Reader, scratch []byte) (byte, uint64, error) { - first, err := readByteBuf(br, scratch) - if err != nil { - return 0, 0, err - } - - defer func() { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - }() - - maj := (first & 0xe0) >> 5 - low := first & 0x1f - - switch { - case low < 24: - return maj, uint64(low), nil - case low == 24: - next, err := readByteBuf(br, scratch) - if err != nil { - return 0, 0, err - } - if next < 24 { - return 0, 0, fmt.Errorf("cbor input was not canonical (lval 24 with value < 24)") - } - return maj, uint64(next), nil - case low == 25: - if _, err := io.ReadAtLeast(br, scratch[:2], 2); err != nil { - return 0, 0, err - } - val := uint64(binary.BigEndian.Uint16(scratch[:2])) - if val <= math.MaxUint8 { - return 0, 0, fmt.Errorf("cbor input was not canonical (lval 25 with value <= MaxUint8)") - } - return maj, val, nil - case low == 26: - if _, err := io.ReadAtLeast(br, scratch[:4], 4); err != nil { - return 0, 0, err - } - val := uint64(binary.BigEndian.Uint32(scratch[:4])) - if val <= math.MaxUint16 { - return 0, 0, fmt.Errorf("cbor input was not canonical (lval 26 with value <= MaxUint16)") - } - return maj, val, nil - case low == 27: - if _, err := io.ReadAtLeast(br, scratch[:8], 8); err != nil { - return 0, 0, err - } - val := binary.BigEndian.Uint64(scratch[:8]) - if val <= math.MaxUint32 { - return 0, 0, fmt.Errorf("cbor input was not canonical (lval 27 with value <= MaxUint32)") - } - return maj, val, nil - default: - return 0, 0, fmt.Errorf("invalid header: (%x)", first) - } -} - -func CborWriteHeader(w io.Writer, t byte, l uint64) error { - return WriteMajorTypeHeader(w, t, l) -} - -// TODO: No matter what I do, this function *still* allocates. Its super frustrating. -// See issue: https://github.com/golang/go/issues/33160 -func WriteMajorTypeHeader(w io.Writer, t byte, l uint64) error { - if w, ok := w.(*CborWriter); ok { - return w.WriteMajorTypeHeader(t, l) - } - - switch { - case l < 24: - _, err := w.Write([]byte{(t << 5) | byte(l)}) - return err - case l < (1 << 8): - _, err := w.Write([]byte{(t << 5) | 24, byte(l)}) - return err - case l < (1 << 16): - var b [3]byte - b[0] = (t << 5) | 25 - binary.BigEndian.PutUint16(b[1:3], uint16(l)) - _, err := w.Write(b[:]) - return err - case l < (1 << 32): - var b [5]byte - b[0] = (t << 5) | 26 - binary.BigEndian.PutUint32(b[1:5], uint32(l)) - _, err := w.Write(b[:]) - return err - default: - var b [9]byte - b[0] = (t << 5) | 27 - binary.BigEndian.PutUint64(b[1:], uint64(l)) - _, err := w.Write(b[:]) - return err - } -} - -// Same as the above, but uses a passed in buffer to avoid allocations -func WriteMajorTypeHeaderBuf(buf []byte, w io.Writer, t byte, l uint64) error { - switch { - case l < 24: - buf[0] = (t << 5) | byte(l) - _, err := w.Write(buf[:1]) - return err - case l < (1 << 8): - buf[0] = (t << 5) | 24 - buf[1] = byte(l) - _, err := w.Write(buf[:2]) - return err - case l < (1 << 16): - buf[0] = (t << 5) | 25 - binary.BigEndian.PutUint16(buf[1:3], uint16(l)) - _, err := w.Write(buf[:3]) - return err - case l < (1 << 32): - buf[0] = (t << 5) | 26 - binary.BigEndian.PutUint32(buf[1:5], uint32(l)) - _, err := w.Write(buf[:5]) - return err - default: - buf[0] = (t << 5) | 27 - binary.BigEndian.PutUint64(buf[1:9], uint64(l)) - _, err := w.Write(buf[:9]) - return err - } -} - -func CborEncodeMajorType(t byte, l uint64) []byte { - switch { - case l < 24: - var b [1]byte - b[0] = (t << 5) | byte(l) - return b[:1] - case l < (1 << 8): - var b [2]byte - b[0] = (t << 5) | 24 - b[1] = byte(l) - return b[:2] - case l < (1 << 16): - var b [3]byte - b[0] = (t << 5) | 25 - binary.BigEndian.PutUint16(b[1:3], uint16(l)) - return b[:3] - case l < (1 << 32): - var b [5]byte - b[0] = (t << 5) | 26 - binary.BigEndian.PutUint32(b[1:5], uint32(l)) - return b[:5] - default: - var b [9]byte - b[0] = (t << 5) | 27 - binary.BigEndian.PutUint64(b[1:], uint64(l)) - return b[:] - } -} - -func ReadTaggedByteArray(br io.Reader, exptag uint64, maxlen uint64) (bs []byte, err error) { - maj, extra, err := CborReadHeader(br) - if err != nil { - return nil, err - } - defer func() { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - }() - - if maj != MajTag { - return nil, fmt.Errorf("expected cbor type 'tag' in input") - } - - if extra != exptag { - return nil, fmt.Errorf("expected tag %d", exptag) - } - - return ReadByteArray(br, maxlen) -} - -func ReadByteArray(br io.Reader, maxlen uint64) ([]byte, error) { - maj, extra, err := CborReadHeader(br) - if err != nil { - return nil, err - } - - if maj != MajByteString { - return nil, fmt.Errorf("expected cbor type 'byte string' in input") - } - - if extra > maxlen { - return nil, fmt.Errorf("string in cbor input too long, maxlen: %d", maxlen) - } - - buf := make([]byte, extra) - if _, err := io.ReadAtLeast(br, buf, int(extra)); err != nil { - return nil, err - } - - return buf, nil -} - -var ( - CborBoolFalse = []byte{0xf4} - CborBoolTrue = []byte{0xf5} - CborNull = []byte{0xf6} -) - -func EncodeBool(b bool) []byte { - if b { - return CborBoolTrue - } - return CborBoolFalse -} - -func WriteBool(w io.Writer, b bool) error { - _, err := w.Write(EncodeBool(b)) - return err -} - -var stringBufPool = sync.Pool{ - New: func() interface{} { - b := make([]byte, MaxLength) - return &b - }, -} - -func ReadString(r io.Reader) (string, error) { - maj, l, err := CborReadHeader(r) - if err != nil { - return "", err - } - - if maj != MajTextString { - return "", fmt.Errorf("got tag %d while reading string value (l = %d)", maj, l) - } - - if l > MaxLength { - return "", fmt.Errorf("string in input was too long") - } - - bufp := stringBufPool.Get().(*[]byte) - buf := (*bufp)[:l] // shares same backing array as pooled slice - defer func() { - // optimizes to memclr - for i := range buf { - buf[i] = 0 - } - stringBufPool.Put(bufp) - }() - _, err = io.ReadAtLeast(r, buf, int(l)) - if err != nil { - return "", err - } - - return string(buf), nil -} - -// Deprecated: use ReadString -func ReadStringBuf(r io.Reader, _ []byte) (string, error) { - return ReadString(r) -} - -func ReadCid(br io.Reader) (cid.Cid, error) { - buf, err := ReadTaggedByteArray(br, 42, 512) - if err != nil { - return cid.Undef, err - } - - return bufToCid(buf) -} - -func bufToCid(buf []byte) (cid.Cid, error) { - if len(buf) == 0 { - return cid.Undef, fmt.Errorf("undefined cid") - } - - if len(buf) < 2 { - return cid.Undef, fmt.Errorf("cbor serialized CIDs must have at least two bytes") - } - - if buf[0] != 0 { - return cid.Undef, fmt.Errorf("cbor serialized CIDs must have binary multibase") - } - - return cid.Cast(buf[1:]) -} - -var byteArrZero = []byte{0} - -func WriteCid(w io.Writer, c cid.Cid) error { - cw := NewCborWriter(w) - if err := cw.WriteMajorTypeHeader(MajTag, 42); err != nil { - return err - } - if c == cid.Undef { - return fmt.Errorf("undefined cid") - // return CborWriteHeader(w, MajByteString, 0) - } - - if err := cw.WriteMajorTypeHeader(MajByteString, uint64(c.ByteLen()+1)); err != nil { - return err - } - - // that binary multibase prefix... - if _, err := cw.Write(byteArrZero); err != nil { - return err - } - - if _, err := c.WriteBytes(cw); err != nil { - return err - } - - return nil -} - -func WriteCidBuf(buf []byte, w io.Writer, c cid.Cid) error { - if err := WriteMajorTypeHeaderBuf(buf, w, MajTag, 42); err != nil { - return err - } - if c == cid.Undef { - return fmt.Errorf("undefined cid") - // return CborWriteHeader(w, MajByteString, 0) - } - - if err := WriteMajorTypeHeaderBuf(buf, w, MajByteString, uint64(c.ByteLen()+1)); err != nil { - return err - } - - // that binary multibase prefix... - if _, err := w.Write(byteArrZero); err != nil { - return err - } - - if _, err := c.WriteBytes(w); err != nil { - return err - } - - return nil -} - -type CborBool bool - -func (cb CborBool) MarshalCBOR(w io.Writer) error { - return WriteBool(w, bool(cb)) -} - -func (cb *CborBool) UnmarshalCBOR(r io.Reader) error { - t, val, err := CborReadHeader(r) - if err != nil { - return err - } - - if t != MajOther { - return fmt.Errorf("booleans should be major type 7") - } - - switch val { - case 20: - *cb = false - case 21: - *cb = true - default: - return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", val) - } - return nil -} - -type CborInt int64 - -func (ci CborInt) MarshalCBOR(w io.Writer) error { - v := int64(ci) - if v >= 0 { - if err := WriteMajorTypeHeader(w, MajUnsignedInt, uint64(v)); err != nil { - return err - } - } else { - if err := WriteMajorTypeHeader(w, MajNegativeInt, uint64(-v)-1); err != nil { - return err - } - } - return nil -} - -func (ci *CborInt) UnmarshalCBOR(r io.Reader) error { - maj, extra, err := CborReadHeader(r) - if err != nil { - return err - } - var extraI int64 - switch maj { - case MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - *ci = CborInt(extraI) - return nil -} diff --git a/vendor/github.com/whyrusleeping/cbor-gen/validate.go b/vendor/github.com/whyrusleeping/cbor-gen/validate.go deleted file mode 100644 index 3c8495c..0000000 --- a/vendor/github.com/whyrusleeping/cbor-gen/validate.go +++ /dev/null @@ -1,56 +0,0 @@ -package typegen - -import ( - "bytes" - "fmt" - "io" -) - -// ValidateCBOR validates that a byte array is a single valid CBOR object. -func ValidateCBOR(b []byte) error { - // The code here is basically identical to the previous function, it - // just doesn't copy. - - br := bytes.NewReader(b) - - for remaining := uint64(1); remaining > 0; remaining-- { - maj, extra, err := CborReadHeader(br) - if err != nil { - return err - } - - switch maj { - case MajUnsignedInt, MajNegativeInt, MajOther: - // nothing fancy to do - case MajByteString, MajTextString: - if extra > ByteArrayMaxLen { - return maxLengthError - } - if uint64(br.Len()) < extra { - return io.ErrUnexpectedEOF - } - - if _, err := br.Seek(int64(extra), io.SeekCurrent); err != nil { - return err - } - case MajTag: - remaining++ - case MajArray: - if extra > MaxLength { - return maxLengthError - } - remaining += extra - case MajMap: - if extra > MaxLength { - return maxLengthError - } - remaining += extra * 2 - default: - return fmt.Errorf("unhandled deferred cbor type: %d", maj) - } - } - if br.Len() > 0 { - return fmt.Errorf("unexpected %d unread bytes", br.Len()) - } - return nil -} diff --git a/vendor/golang.org/x/crypto/AUTHORS b/vendor/golang.org/x/crypto/AUTHORS deleted file mode 100644 index 2b00ddb..0000000 --- a/vendor/golang.org/x/crypto/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at https://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/crypto/CONTRIBUTORS b/vendor/golang.org/x/crypto/CONTRIBUTORS deleted file mode 100644 index 1fbd3e9..0000000 --- a/vendor/golang.org/x/crypto/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at https://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/crypto/LICENSE b/vendor/golang.org/x/crypto/LICENSE deleted file mode 100644 index 6a66aea..0000000 --- a/vendor/golang.org/x/crypto/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/crypto/PATENTS b/vendor/golang.org/x/crypto/PATENTS deleted file mode 100644 index 7330990..0000000 --- a/vendor/golang.org/x/crypto/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/crypto/blake2s/blake2s.go b/vendor/golang.org/x/crypto/blake2s/blake2s.go deleted file mode 100644 index e3f46aa..0000000 --- a/vendor/golang.org/x/crypto/blake2s/blake2s.go +++ /dev/null @@ -1,246 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package blake2s implements the BLAKE2s hash algorithm defined by RFC 7693 -// and the extendable output function (XOF) BLAKE2Xs. -// -// BLAKE2s is optimized for 8- to 32-bit platforms and produces digests of any -// size between 1 and 32 bytes. -// For a detailed specification of BLAKE2s see https://blake2.net/blake2.pdf -// and for BLAKE2Xs see https://blake2.net/blake2x.pdf -// -// If you aren't sure which function you need, use BLAKE2s (Sum256 or New256). -// If you need a secret-key MAC (message authentication code), use the New256 -// function with a non-nil key. -// -// BLAKE2X is a construction to compute hash values larger than 32 bytes. It -// can produce hash values between 0 and 65535 bytes. -package blake2s // import "golang.org/x/crypto/blake2s" - -import ( - "encoding/binary" - "errors" - "hash" -) - -const ( - // The blocksize of BLAKE2s in bytes. - BlockSize = 64 - - // The hash size of BLAKE2s-256 in bytes. - Size = 32 - - // The hash size of BLAKE2s-128 in bytes. - Size128 = 16 -) - -var errKeySize = errors.New("blake2s: invalid key size") - -var iv = [8]uint32{ - 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, - 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19, -} - -// Sum256 returns the BLAKE2s-256 checksum of the data. -func Sum256(data []byte) [Size]byte { - var sum [Size]byte - checkSum(&sum, Size, data) - return sum -} - -// New256 returns a new hash.Hash computing the BLAKE2s-256 checksum. A non-nil -// key turns the hash into a MAC. The key must between zero and 32 bytes long. -// When the key is nil, the returned hash.Hash implements BinaryMarshaler -// and BinaryUnmarshaler for state (de)serialization as documented by hash.Hash. -func New256(key []byte) (hash.Hash, error) { return newDigest(Size, key) } - -// New128 returns a new hash.Hash computing the BLAKE2s-128 checksum given a -// non-empty key. Note that a 128-bit digest is too small to be secure as a -// cryptographic hash and should only be used as a MAC, thus the key argument -// is not optional. -func New128(key []byte) (hash.Hash, error) { - if len(key) == 0 { - return nil, errors.New("blake2s: a key is required for a 128-bit hash") - } - return newDigest(Size128, key) -} - -func newDigest(hashSize int, key []byte) (*digest, error) { - if len(key) > Size { - return nil, errKeySize - } - d := &digest{ - size: hashSize, - keyLen: len(key), - } - copy(d.key[:], key) - d.Reset() - return d, nil -} - -func checkSum(sum *[Size]byte, hashSize int, data []byte) { - var ( - h [8]uint32 - c [2]uint32 - ) - - h = iv - h[0] ^= uint32(hashSize) | (1 << 16) | (1 << 24) - - if length := len(data); length > BlockSize { - n := length &^ (BlockSize - 1) - if length == n { - n -= BlockSize - } - hashBlocks(&h, &c, 0, data[:n]) - data = data[n:] - } - - var block [BlockSize]byte - offset := copy(block[:], data) - remaining := uint32(BlockSize - offset) - - if c[0] < remaining { - c[1]-- - } - c[0] -= remaining - - hashBlocks(&h, &c, 0xFFFFFFFF, block[:]) - - for i, v := range h { - binary.LittleEndian.PutUint32(sum[4*i:], v) - } -} - -type digest struct { - h [8]uint32 - c [2]uint32 - size int - block [BlockSize]byte - offset int - - key [BlockSize]byte - keyLen int -} - -const ( - magic = "b2s" - marshaledSize = len(magic) + 8*4 + 2*4 + 1 + BlockSize + 1 -) - -func (d *digest) MarshalBinary() ([]byte, error) { - if d.keyLen != 0 { - return nil, errors.New("crypto/blake2s: cannot marshal MACs") - } - b := make([]byte, 0, marshaledSize) - b = append(b, magic...) - for i := 0; i < 8; i++ { - b = appendUint32(b, d.h[i]) - } - b = appendUint32(b, d.c[0]) - b = appendUint32(b, d.c[1]) - // Maximum value for size is 32 - b = append(b, byte(d.size)) - b = append(b, d.block[:]...) - b = append(b, byte(d.offset)) - return b, nil -} - -func (d *digest) UnmarshalBinary(b []byte) error { - if len(b) < len(magic) || string(b[:len(magic)]) != magic { - return errors.New("crypto/blake2s: invalid hash state identifier") - } - if len(b) != marshaledSize { - return errors.New("crypto/blake2s: invalid hash state size") - } - b = b[len(magic):] - for i := 0; i < 8; i++ { - b, d.h[i] = consumeUint32(b) - } - b, d.c[0] = consumeUint32(b) - b, d.c[1] = consumeUint32(b) - d.size = int(b[0]) - b = b[1:] - copy(d.block[:], b[:BlockSize]) - b = b[BlockSize:] - d.offset = int(b[0]) - return nil -} - -func (d *digest) BlockSize() int { return BlockSize } - -func (d *digest) Size() int { return d.size } - -func (d *digest) Reset() { - d.h = iv - d.h[0] ^= uint32(d.size) | (uint32(d.keyLen) << 8) | (1 << 16) | (1 << 24) - d.offset, d.c[0], d.c[1] = 0, 0, 0 - if d.keyLen > 0 { - d.block = d.key - d.offset = BlockSize - } -} - -func (d *digest) Write(p []byte) (n int, err error) { - n = len(p) - - if d.offset > 0 { - remaining := BlockSize - d.offset - if n <= remaining { - d.offset += copy(d.block[d.offset:], p) - return - } - copy(d.block[d.offset:], p[:remaining]) - hashBlocks(&d.h, &d.c, 0, d.block[:]) - d.offset = 0 - p = p[remaining:] - } - - if length := len(p); length > BlockSize { - nn := length &^ (BlockSize - 1) - if length == nn { - nn -= BlockSize - } - hashBlocks(&d.h, &d.c, 0, p[:nn]) - p = p[nn:] - } - - d.offset += copy(d.block[:], p) - return -} - -func (d *digest) Sum(sum []byte) []byte { - var hash [Size]byte - d.finalize(&hash) - return append(sum, hash[:d.size]...) -} - -func (d *digest) finalize(hash *[Size]byte) { - var block [BlockSize]byte - h := d.h - c := d.c - - copy(block[:], d.block[:d.offset]) - remaining := uint32(BlockSize - d.offset) - if c[0] < remaining { - c[1]-- - } - c[0] -= remaining - - hashBlocks(&h, &c, 0xFFFFFFFF, block[:]) - for i, v := range h { - binary.LittleEndian.PutUint32(hash[4*i:], v) - } -} - -func appendUint32(b []byte, x uint32) []byte { - var a [4]byte - binary.BigEndian.PutUint32(a[:], x) - return append(b, a[:]...) -} - -func consumeUint32(b []byte) ([]byte, uint32) { - x := binary.BigEndian.Uint32(b) - return b[4:], x -} diff --git a/vendor/golang.org/x/crypto/blake2s/blake2s_generic.go b/vendor/golang.org/x/crypto/blake2s/blake2s_generic.go deleted file mode 100644 index d0735ad..0000000 --- a/vendor/golang.org/x/crypto/blake2s/blake2s_generic.go +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package blake2s - -import ( - "math/bits" -) - -// the precomputed values for BLAKE2s -// there are 10 16-byte arrays - one for each round -// the entries are calculated from the sigma constants. -var precomputed = [10][16]byte{ - {0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15}, - {14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3}, - {11, 12, 5, 15, 8, 0, 2, 13, 10, 3, 7, 9, 14, 6, 1, 4}, - {7, 3, 13, 11, 9, 1, 12, 14, 2, 5, 4, 15, 6, 10, 0, 8}, - {9, 5, 2, 10, 0, 7, 4, 15, 14, 11, 6, 3, 1, 12, 8, 13}, - {2, 6, 0, 8, 12, 10, 11, 3, 4, 7, 15, 1, 13, 5, 14, 9}, - {12, 1, 14, 4, 5, 15, 13, 10, 0, 6, 9, 8, 7, 3, 2, 11}, - {13, 7, 12, 3, 11, 14, 1, 9, 5, 15, 8, 2, 0, 4, 6, 10}, - {6, 14, 11, 0, 15, 9, 3, 8, 12, 13, 1, 10, 2, 7, 4, 5}, - {10, 8, 7, 1, 2, 4, 6, 5, 15, 9, 3, 13, 11, 14, 12, 0}, -} - -func hashBlocks(h *[8]uint32, c *[2]uint32, flag uint32, blocks []byte) { - var m [16]uint32 - c0, c1 := c[0], c[1] - - for i := 0; i < len(blocks); { - c0 += BlockSize - if c0 < BlockSize { - c1++ - } - - v0, v1, v2, v3, v4, v5, v6, v7 := h[0], h[1], h[2], h[3], h[4], h[5], h[6], h[7] - v8, v9, v10, v11, v12, v13, v14, v15 := iv[0], iv[1], iv[2], iv[3], iv[4], iv[5], iv[6], iv[7] - v12 ^= c0 - v13 ^= c1 - v14 ^= flag - - for j := range m { - m[j] = uint32(blocks[i]) | uint32(blocks[i+1])<<8 | uint32(blocks[i+2])<<16 | uint32(blocks[i+3])<<24 - i += 4 - } - - for k := range precomputed { - s := &(precomputed[k]) - - v0 += m[s[0]] - v0 += v4 - v12 ^= v0 - v12 = bits.RotateLeft32(v12, -16) - v8 += v12 - v4 ^= v8 - v4 = bits.RotateLeft32(v4, -12) - v1 += m[s[1]] - v1 += v5 - v13 ^= v1 - v13 = bits.RotateLeft32(v13, -16) - v9 += v13 - v5 ^= v9 - v5 = bits.RotateLeft32(v5, -12) - v2 += m[s[2]] - v2 += v6 - v14 ^= v2 - v14 = bits.RotateLeft32(v14, -16) - v10 += v14 - v6 ^= v10 - v6 = bits.RotateLeft32(v6, -12) - v3 += m[s[3]] - v3 += v7 - v15 ^= v3 - v15 = bits.RotateLeft32(v15, -16) - v11 += v15 - v7 ^= v11 - v7 = bits.RotateLeft32(v7, -12) - - v0 += m[s[4]] - v0 += v4 - v12 ^= v0 - v12 = bits.RotateLeft32(v12, -8) - v8 += v12 - v4 ^= v8 - v4 = bits.RotateLeft32(v4, -7) - v1 += m[s[5]] - v1 += v5 - v13 ^= v1 - v13 = bits.RotateLeft32(v13, -8) - v9 += v13 - v5 ^= v9 - v5 = bits.RotateLeft32(v5, -7) - v2 += m[s[6]] - v2 += v6 - v14 ^= v2 - v14 = bits.RotateLeft32(v14, -8) - v10 += v14 - v6 ^= v10 - v6 = bits.RotateLeft32(v6, -7) - v3 += m[s[7]] - v3 += v7 - v15 ^= v3 - v15 = bits.RotateLeft32(v15, -8) - v11 += v15 - v7 ^= v11 - v7 = bits.RotateLeft32(v7, -7) - - v0 += m[s[8]] - v0 += v5 - v15 ^= v0 - v15 = bits.RotateLeft32(v15, -16) - v10 += v15 - v5 ^= v10 - v5 = bits.RotateLeft32(v5, -12) - v1 += m[s[9]] - v1 += v6 - v12 ^= v1 - v12 = bits.RotateLeft32(v12, -16) - v11 += v12 - v6 ^= v11 - v6 = bits.RotateLeft32(v6, -12) - v2 += m[s[10]] - v2 += v7 - v13 ^= v2 - v13 = bits.RotateLeft32(v13, -16) - v8 += v13 - v7 ^= v8 - v7 = bits.RotateLeft32(v7, -12) - v3 += m[s[11]] - v3 += v4 - v14 ^= v3 - v14 = bits.RotateLeft32(v14, -16) - v9 += v14 - v4 ^= v9 - v4 = bits.RotateLeft32(v4, -12) - - v0 += m[s[12]] - v0 += v5 - v15 ^= v0 - v15 = bits.RotateLeft32(v15, -8) - v10 += v15 - v5 ^= v10 - v5 = bits.RotateLeft32(v5, -7) - v1 += m[s[13]] - v1 += v6 - v12 ^= v1 - v12 = bits.RotateLeft32(v12, -8) - v11 += v12 - v6 ^= v11 - v6 = bits.RotateLeft32(v6, -7) - v2 += m[s[14]] - v2 += v7 - v13 ^= v2 - v13 = bits.RotateLeft32(v13, -8) - v8 += v13 - v7 ^= v8 - v7 = bits.RotateLeft32(v7, -7) - v3 += m[s[15]] - v3 += v4 - v14 ^= v3 - v14 = bits.RotateLeft32(v14, -8) - v9 += v14 - v4 ^= v9 - v4 = bits.RotateLeft32(v4, -7) - } - - h[0] ^= v0 ^ v8 - h[1] ^= v1 ^ v9 - h[2] ^= v2 ^ v10 - h[3] ^= v3 ^ v11 - h[4] ^= v4 ^ v12 - h[5] ^= v5 ^ v13 - h[6] ^= v6 ^ v14 - h[7] ^= v7 ^ v15 - } - c[0], c[1] = c0, c1 -} diff --git a/vendor/golang.org/x/crypto/blake2s/blake2x.go b/vendor/golang.org/x/crypto/blake2s/blake2x.go deleted file mode 100644 index 828749f..0000000 --- a/vendor/golang.org/x/crypto/blake2s/blake2x.go +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package blake2s - -import ( - "encoding/binary" - "errors" - "io" -) - -// XOF defines the interface to hash functions that -// support arbitrary-length output. -type XOF interface { - // Write absorbs more data into the hash's state. It panics if called - // after Read. - io.Writer - - // Read reads more output from the hash. It returns io.EOF if the limit - // has been reached. - io.Reader - - // Clone returns a copy of the XOF in its current state. - Clone() XOF - - // Reset resets the XOF to its initial state. - Reset() -} - -// OutputLengthUnknown can be used as the size argument to NewXOF to indicate -// the length of the output is not known in advance. -const OutputLengthUnknown = 0 - -// magicUnknownOutputLength is a magic value for the output size that indicates -// an unknown number of output bytes. -const magicUnknownOutputLength = 65535 - -// maxOutputLength is the absolute maximum number of bytes to produce when the -// number of output bytes is unknown. -const maxOutputLength = (1 << 32) * 32 - -// NewXOF creates a new variable-output-length hash. The hash either produce a -// known number of bytes (1 <= size < 65535), or an unknown number of bytes -// (size == OutputLengthUnknown). In the latter case, an absolute limit of -// 128GiB applies. -// -// A non-nil key turns the hash into a MAC. The key must between -// zero and 32 bytes long. -func NewXOF(size uint16, key []byte) (XOF, error) { - if len(key) > Size { - return nil, errKeySize - } - if size == magicUnknownOutputLength { - // 2^16-1 indicates an unknown number of bytes and thus isn't a - // valid length. - return nil, errors.New("blake2s: XOF length too large") - } - if size == OutputLengthUnknown { - size = magicUnknownOutputLength - } - x := &xof{ - d: digest{ - size: Size, - keyLen: len(key), - }, - length: size, - } - copy(x.d.key[:], key) - x.Reset() - return x, nil -} - -type xof struct { - d digest - length uint16 - remaining uint64 - cfg, root, block [Size]byte - offset int - nodeOffset uint32 - readMode bool -} - -func (x *xof) Write(p []byte) (n int, err error) { - if x.readMode { - panic("blake2s: write to XOF after read") - } - return x.d.Write(p) -} - -func (x *xof) Clone() XOF { - clone := *x - return &clone -} - -func (x *xof) Reset() { - x.cfg[0] = byte(Size) - binary.LittleEndian.PutUint32(x.cfg[4:], uint32(Size)) // leaf length - binary.LittleEndian.PutUint16(x.cfg[12:], x.length) // XOF length - x.cfg[15] = byte(Size) // inner hash size - - x.d.Reset() - x.d.h[3] ^= uint32(x.length) - - x.remaining = uint64(x.length) - if x.remaining == magicUnknownOutputLength { - x.remaining = maxOutputLength - } - x.offset, x.nodeOffset = 0, 0 - x.readMode = false -} - -func (x *xof) Read(p []byte) (n int, err error) { - if !x.readMode { - x.d.finalize(&x.root) - x.readMode = true - } - - if x.remaining == 0 { - return 0, io.EOF - } - - n = len(p) - if uint64(n) > x.remaining { - n = int(x.remaining) - p = p[:n] - } - - if x.offset > 0 { - blockRemaining := Size - x.offset - if n < blockRemaining { - x.offset += copy(p, x.block[x.offset:]) - x.remaining -= uint64(n) - return - } - copy(p, x.block[x.offset:]) - p = p[blockRemaining:] - x.offset = 0 - x.remaining -= uint64(blockRemaining) - } - - for len(p) >= Size { - binary.LittleEndian.PutUint32(x.cfg[8:], x.nodeOffset) - x.nodeOffset++ - - x.d.initConfig(&x.cfg) - x.d.Write(x.root[:]) - x.d.finalize(&x.block) - - copy(p, x.block[:]) - p = p[Size:] - x.remaining -= uint64(Size) - } - - if todo := len(p); todo > 0 { - if x.remaining < uint64(Size) { - x.cfg[0] = byte(x.remaining) - } - binary.LittleEndian.PutUint32(x.cfg[8:], x.nodeOffset) - x.nodeOffset++ - - x.d.initConfig(&x.cfg) - x.d.Write(x.root[:]) - x.d.finalize(&x.block) - - x.offset = copy(p, x.block[:todo]) - x.remaining -= uint64(todo) - } - - return -} - -func (d *digest) initConfig(cfg *[Size]byte) { - d.offset, d.c[0], d.c[1] = 0, 0, 0 - for i := range d.h { - d.h[i] = iv[i] ^ binary.LittleEndian.Uint32(cfg[i*4:]) - } -} diff --git a/vendor/golang.org/x/crypto/blake2s/register.go b/vendor/golang.org/x/crypto/blake2s/register.go deleted file mode 100644 index ef79ff3..0000000 --- a/vendor/golang.org/x/crypto/blake2s/register.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.9 -// +build go1.9 - -package blake2s - -import ( - "crypto" - "hash" -) - -func init() { - newHash256 := func() hash.Hash { - h, _ := New256(nil) - return h - } - - crypto.RegisterHash(crypto.BLAKE2s_256, newHash256) -} diff --git a/vendor/golang.org/x/crypto/sha3/doc.go b/vendor/golang.org/x/crypto/sha3/doc.go deleted file mode 100644 index decd8cf..0000000 --- a/vendor/golang.org/x/crypto/sha3/doc.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package sha3 implements the SHA-3 fixed-output-length hash functions and -// the SHAKE variable-output-length hash functions defined by FIPS-202. -// -// Both types of hash function use the "sponge" construction and the Keccak -// permutation. For a detailed specification see http://keccak.noekeon.org/ -// -// # Guidance -// -// If you aren't sure what function you need, use SHAKE256 with at least 64 -// bytes of output. The SHAKE instances are faster than the SHA3 instances; -// the latter have to allocate memory to conform to the hash.Hash interface. -// -// If you need a secret-key MAC (message authentication code), prepend the -// secret key to the input, hash with SHAKE256 and read at least 32 bytes of -// output. -// -// # Security strengths -// -// The SHA3-x (x equals 224, 256, 384, or 512) functions have a security -// strength against preimage attacks of x bits. Since they only produce "x" -// bits of output, their collision-resistance is only "x/2" bits. -// -// The SHAKE-256 and -128 functions have a generic security strength of 256 and -// 128 bits against all attacks, provided that at least 2x bits of their output -// is used. Requesting more than 64 or 32 bytes of output, respectively, does -// not increase the collision-resistance of the SHAKE functions. -// -// # The sponge construction -// -// A sponge builds a pseudo-random function from a public pseudo-random -// permutation, by applying the permutation to a state of "rate + capacity" -// bytes, but hiding "capacity" of the bytes. -// -// A sponge starts out with a zero state. To hash an input using a sponge, up -// to "rate" bytes of the input are XORed into the sponge's state. The sponge -// is then "full" and the permutation is applied to "empty" it. This process is -// repeated until all the input has been "absorbed". The input is then padded. -// The digest is "squeezed" from the sponge in the same way, except that output -// is copied out instead of input being XORed in. -// -// A sponge is parameterized by its generic security strength, which is equal -// to half its capacity; capacity + rate is equal to the permutation's width. -// Since the KeccakF-1600 permutation is 1600 bits (200 bytes) wide, this means -// that the security strength of a sponge instance is equal to (1600 - bitrate) / 2. -// -// # Recommendations -// -// The SHAKE functions are recommended for most new uses. They can produce -// output of arbitrary length. SHAKE256, with an output length of at least -// 64 bytes, provides 256-bit security against all attacks. The Keccak team -// recommends it for most applications upgrading from SHA2-512. (NIST chose a -// much stronger, but much slower, sponge instance for SHA3-512.) -// -// The SHA-3 functions are "drop-in" replacements for the SHA-2 functions. -// They produce output of the same length, with the same security strengths -// against all attacks. This means, in particular, that SHA3-256 only has -// 128-bit collision resistance, because its output length is 32 bytes. -package sha3 // import "golang.org/x/crypto/sha3" diff --git a/vendor/golang.org/x/crypto/sha3/hashes.go b/vendor/golang.org/x/crypto/sha3/hashes.go deleted file mode 100644 index 1e815c9..0000000 --- a/vendor/golang.org/x/crypto/sha3/hashes.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package sha3 - -// This file provides functions for creating instances of the SHA-3 -// and SHAKE hash functions, as well as utility functions for hashing -// bytes. - -import ( - "hash" -) - -// New224 creates a new SHA3-224 hash. -// Its generic security strength is 224 bits against preimage attacks, -// and 112 bits against collision attacks. -func New224() hash.Hash { - return &state{rate: 144, outputLen: 28, dsbyte: 0x06} -} - -// New256 creates a new SHA3-256 hash. -// Its generic security strength is 256 bits against preimage attacks, -// and 128 bits against collision attacks. -func New256() hash.Hash { - return &state{rate: 136, outputLen: 32, dsbyte: 0x06} -} - -// New384 creates a new SHA3-384 hash. -// Its generic security strength is 384 bits against preimage attacks, -// and 192 bits against collision attacks. -func New384() hash.Hash { - return &state{rate: 104, outputLen: 48, dsbyte: 0x06} -} - -// New512 creates a new SHA3-512 hash. -// Its generic security strength is 512 bits against preimage attacks, -// and 256 bits against collision attacks. -func New512() hash.Hash { - return &state{rate: 72, outputLen: 64, dsbyte: 0x06} -} - -// NewLegacyKeccak256 creates a new Keccak-256 hash. -// -// Only use this function if you require compatibility with an existing cryptosystem -// that uses non-standard padding. All other users should use New256 instead. -func NewLegacyKeccak256() hash.Hash { return &state{rate: 136, outputLen: 32, dsbyte: 0x01} } - -// NewLegacyKeccak512 creates a new Keccak-512 hash. -// -// Only use this function if you require compatibility with an existing cryptosystem -// that uses non-standard padding. All other users should use New512 instead. -func NewLegacyKeccak512() hash.Hash { return &state{rate: 72, outputLen: 64, dsbyte: 0x01} } - -// Sum224 returns the SHA3-224 digest of the data. -func Sum224(data []byte) (digest [28]byte) { - h := New224() - h.Write(data) - h.Sum(digest[:0]) - return -} - -// Sum256 returns the SHA3-256 digest of the data. -func Sum256(data []byte) (digest [32]byte) { - h := New256() - h.Write(data) - h.Sum(digest[:0]) - return -} - -// Sum384 returns the SHA3-384 digest of the data. -func Sum384(data []byte) (digest [48]byte) { - h := New384() - h.Write(data) - h.Sum(digest[:0]) - return -} - -// Sum512 returns the SHA3-512 digest of the data. -func Sum512(data []byte) (digest [64]byte) { - h := New512() - h.Write(data) - h.Sum(digest[:0]) - return -} diff --git a/vendor/golang.org/x/crypto/sha3/keccakf.go b/vendor/golang.org/x/crypto/sha3/keccakf.go deleted file mode 100644 index 13e7058..0000000 --- a/vendor/golang.org/x/crypto/sha3/keccakf.go +++ /dev/null @@ -1,410 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package sha3 - -// rc stores the round constants for use in the ι step. -var rc = [24]uint64{ - 0x0000000000000001, - 0x0000000000008082, - 0x800000000000808A, - 0x8000000080008000, - 0x000000000000808B, - 0x0000000080000001, - 0x8000000080008081, - 0x8000000000008009, - 0x000000000000008A, - 0x0000000000000088, - 0x0000000080008009, - 0x000000008000000A, - 0x000000008000808B, - 0x800000000000008B, - 0x8000000000008089, - 0x8000000000008003, - 0x8000000000008002, - 0x8000000000000080, - 0x000000000000800A, - 0x800000008000000A, - 0x8000000080008081, - 0x8000000000008080, - 0x0000000080000001, - 0x8000000080008008, -} - -// keccakF1600 applies the Keccak permutation to a 1600b-wide -// state represented as a slice of 25 uint64s. -func keccakF1600(a *[25]uint64) { - // Implementation translated from Keccak-inplace.c - // in the keccak reference code. - var t, bc0, bc1, bc2, bc3, bc4, d0, d1, d2, d3, d4 uint64 - - for i := 0; i < 24; i += 4 { - // Combines the 5 steps in each round into 2 steps. - // Unrolls 4 rounds per loop and spreads some steps across rounds. - - // Round 1 - bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] - bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] - bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] - bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] - bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] - d0 = bc4 ^ (bc1<<1 | bc1>>63) - d1 = bc0 ^ (bc2<<1 | bc2>>63) - d2 = bc1 ^ (bc3<<1 | bc3>>63) - d3 = bc2 ^ (bc4<<1 | bc4>>63) - d4 = bc3 ^ (bc0<<1 | bc0>>63) - - bc0 = a[0] ^ d0 - t = a[6] ^ d1 - bc1 = t<<44 | t>>(64-44) - t = a[12] ^ d2 - bc2 = t<<43 | t>>(64-43) - t = a[18] ^ d3 - bc3 = t<<21 | t>>(64-21) - t = a[24] ^ d4 - bc4 = t<<14 | t>>(64-14) - a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i] - a[6] = bc1 ^ (bc3 &^ bc2) - a[12] = bc2 ^ (bc4 &^ bc3) - a[18] = bc3 ^ (bc0 &^ bc4) - a[24] = bc4 ^ (bc1 &^ bc0) - - t = a[10] ^ d0 - bc2 = t<<3 | t>>(64-3) - t = a[16] ^ d1 - bc3 = t<<45 | t>>(64-45) - t = a[22] ^ d2 - bc4 = t<<61 | t>>(64-61) - t = a[3] ^ d3 - bc0 = t<<28 | t>>(64-28) - t = a[9] ^ d4 - bc1 = t<<20 | t>>(64-20) - a[10] = bc0 ^ (bc2 &^ bc1) - a[16] = bc1 ^ (bc3 &^ bc2) - a[22] = bc2 ^ (bc4 &^ bc3) - a[3] = bc3 ^ (bc0 &^ bc4) - a[9] = bc4 ^ (bc1 &^ bc0) - - t = a[20] ^ d0 - bc4 = t<<18 | t>>(64-18) - t = a[1] ^ d1 - bc0 = t<<1 | t>>(64-1) - t = a[7] ^ d2 - bc1 = t<<6 | t>>(64-6) - t = a[13] ^ d3 - bc2 = t<<25 | t>>(64-25) - t = a[19] ^ d4 - bc3 = t<<8 | t>>(64-8) - a[20] = bc0 ^ (bc2 &^ bc1) - a[1] = bc1 ^ (bc3 &^ bc2) - a[7] = bc2 ^ (bc4 &^ bc3) - a[13] = bc3 ^ (bc0 &^ bc4) - a[19] = bc4 ^ (bc1 &^ bc0) - - t = a[5] ^ d0 - bc1 = t<<36 | t>>(64-36) - t = a[11] ^ d1 - bc2 = t<<10 | t>>(64-10) - t = a[17] ^ d2 - bc3 = t<<15 | t>>(64-15) - t = a[23] ^ d3 - bc4 = t<<56 | t>>(64-56) - t = a[4] ^ d4 - bc0 = t<<27 | t>>(64-27) - a[5] = bc0 ^ (bc2 &^ bc1) - a[11] = bc1 ^ (bc3 &^ bc2) - a[17] = bc2 ^ (bc4 &^ bc3) - a[23] = bc3 ^ (bc0 &^ bc4) - a[4] = bc4 ^ (bc1 &^ bc0) - - t = a[15] ^ d0 - bc3 = t<<41 | t>>(64-41) - t = a[21] ^ d1 - bc4 = t<<2 | t>>(64-2) - t = a[2] ^ d2 - bc0 = t<<62 | t>>(64-62) - t = a[8] ^ d3 - bc1 = t<<55 | t>>(64-55) - t = a[14] ^ d4 - bc2 = t<<39 | t>>(64-39) - a[15] = bc0 ^ (bc2 &^ bc1) - a[21] = bc1 ^ (bc3 &^ bc2) - a[2] = bc2 ^ (bc4 &^ bc3) - a[8] = bc3 ^ (bc0 &^ bc4) - a[14] = bc4 ^ (bc1 &^ bc0) - - // Round 2 - bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] - bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] - bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] - bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] - bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] - d0 = bc4 ^ (bc1<<1 | bc1>>63) - d1 = bc0 ^ (bc2<<1 | bc2>>63) - d2 = bc1 ^ (bc3<<1 | bc3>>63) - d3 = bc2 ^ (bc4<<1 | bc4>>63) - d4 = bc3 ^ (bc0<<1 | bc0>>63) - - bc0 = a[0] ^ d0 - t = a[16] ^ d1 - bc1 = t<<44 | t>>(64-44) - t = a[7] ^ d2 - bc2 = t<<43 | t>>(64-43) - t = a[23] ^ d3 - bc3 = t<<21 | t>>(64-21) - t = a[14] ^ d4 - bc4 = t<<14 | t>>(64-14) - a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+1] - a[16] = bc1 ^ (bc3 &^ bc2) - a[7] = bc2 ^ (bc4 &^ bc3) - a[23] = bc3 ^ (bc0 &^ bc4) - a[14] = bc4 ^ (bc1 &^ bc0) - - t = a[20] ^ d0 - bc2 = t<<3 | t>>(64-3) - t = a[11] ^ d1 - bc3 = t<<45 | t>>(64-45) - t = a[2] ^ d2 - bc4 = t<<61 | t>>(64-61) - t = a[18] ^ d3 - bc0 = t<<28 | t>>(64-28) - t = a[9] ^ d4 - bc1 = t<<20 | t>>(64-20) - a[20] = bc0 ^ (bc2 &^ bc1) - a[11] = bc1 ^ (bc3 &^ bc2) - a[2] = bc2 ^ (bc4 &^ bc3) - a[18] = bc3 ^ (bc0 &^ bc4) - a[9] = bc4 ^ (bc1 &^ bc0) - - t = a[15] ^ d0 - bc4 = t<<18 | t>>(64-18) - t = a[6] ^ d1 - bc0 = t<<1 | t>>(64-1) - t = a[22] ^ d2 - bc1 = t<<6 | t>>(64-6) - t = a[13] ^ d3 - bc2 = t<<25 | t>>(64-25) - t = a[4] ^ d4 - bc3 = t<<8 | t>>(64-8) - a[15] = bc0 ^ (bc2 &^ bc1) - a[6] = bc1 ^ (bc3 &^ bc2) - a[22] = bc2 ^ (bc4 &^ bc3) - a[13] = bc3 ^ (bc0 &^ bc4) - a[4] = bc4 ^ (bc1 &^ bc0) - - t = a[10] ^ d0 - bc1 = t<<36 | t>>(64-36) - t = a[1] ^ d1 - bc2 = t<<10 | t>>(64-10) - t = a[17] ^ d2 - bc3 = t<<15 | t>>(64-15) - t = a[8] ^ d3 - bc4 = t<<56 | t>>(64-56) - t = a[24] ^ d4 - bc0 = t<<27 | t>>(64-27) - a[10] = bc0 ^ (bc2 &^ bc1) - a[1] = bc1 ^ (bc3 &^ bc2) - a[17] = bc2 ^ (bc4 &^ bc3) - a[8] = bc3 ^ (bc0 &^ bc4) - a[24] = bc4 ^ (bc1 &^ bc0) - - t = a[5] ^ d0 - bc3 = t<<41 | t>>(64-41) - t = a[21] ^ d1 - bc4 = t<<2 | t>>(64-2) - t = a[12] ^ d2 - bc0 = t<<62 | t>>(64-62) - t = a[3] ^ d3 - bc1 = t<<55 | t>>(64-55) - t = a[19] ^ d4 - bc2 = t<<39 | t>>(64-39) - a[5] = bc0 ^ (bc2 &^ bc1) - a[21] = bc1 ^ (bc3 &^ bc2) - a[12] = bc2 ^ (bc4 &^ bc3) - a[3] = bc3 ^ (bc0 &^ bc4) - a[19] = bc4 ^ (bc1 &^ bc0) - - // Round 3 - bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] - bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] - bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] - bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] - bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] - d0 = bc4 ^ (bc1<<1 | bc1>>63) - d1 = bc0 ^ (bc2<<1 | bc2>>63) - d2 = bc1 ^ (bc3<<1 | bc3>>63) - d3 = bc2 ^ (bc4<<1 | bc4>>63) - d4 = bc3 ^ (bc0<<1 | bc0>>63) - - bc0 = a[0] ^ d0 - t = a[11] ^ d1 - bc1 = t<<44 | t>>(64-44) - t = a[22] ^ d2 - bc2 = t<<43 | t>>(64-43) - t = a[8] ^ d3 - bc3 = t<<21 | t>>(64-21) - t = a[19] ^ d4 - bc4 = t<<14 | t>>(64-14) - a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+2] - a[11] = bc1 ^ (bc3 &^ bc2) - a[22] = bc2 ^ (bc4 &^ bc3) - a[8] = bc3 ^ (bc0 &^ bc4) - a[19] = bc4 ^ (bc1 &^ bc0) - - t = a[15] ^ d0 - bc2 = t<<3 | t>>(64-3) - t = a[1] ^ d1 - bc3 = t<<45 | t>>(64-45) - t = a[12] ^ d2 - bc4 = t<<61 | t>>(64-61) - t = a[23] ^ d3 - bc0 = t<<28 | t>>(64-28) - t = a[9] ^ d4 - bc1 = t<<20 | t>>(64-20) - a[15] = bc0 ^ (bc2 &^ bc1) - a[1] = bc1 ^ (bc3 &^ bc2) - a[12] = bc2 ^ (bc4 &^ bc3) - a[23] = bc3 ^ (bc0 &^ bc4) - a[9] = bc4 ^ (bc1 &^ bc0) - - t = a[5] ^ d0 - bc4 = t<<18 | t>>(64-18) - t = a[16] ^ d1 - bc0 = t<<1 | t>>(64-1) - t = a[2] ^ d2 - bc1 = t<<6 | t>>(64-6) - t = a[13] ^ d3 - bc2 = t<<25 | t>>(64-25) - t = a[24] ^ d4 - bc3 = t<<8 | t>>(64-8) - a[5] = bc0 ^ (bc2 &^ bc1) - a[16] = bc1 ^ (bc3 &^ bc2) - a[2] = bc2 ^ (bc4 &^ bc3) - a[13] = bc3 ^ (bc0 &^ bc4) - a[24] = bc4 ^ (bc1 &^ bc0) - - t = a[20] ^ d0 - bc1 = t<<36 | t>>(64-36) - t = a[6] ^ d1 - bc2 = t<<10 | t>>(64-10) - t = a[17] ^ d2 - bc3 = t<<15 | t>>(64-15) - t = a[3] ^ d3 - bc4 = t<<56 | t>>(64-56) - t = a[14] ^ d4 - bc0 = t<<27 | t>>(64-27) - a[20] = bc0 ^ (bc2 &^ bc1) - a[6] = bc1 ^ (bc3 &^ bc2) - a[17] = bc2 ^ (bc4 &^ bc3) - a[3] = bc3 ^ (bc0 &^ bc4) - a[14] = bc4 ^ (bc1 &^ bc0) - - t = a[10] ^ d0 - bc3 = t<<41 | t>>(64-41) - t = a[21] ^ d1 - bc4 = t<<2 | t>>(64-2) - t = a[7] ^ d2 - bc0 = t<<62 | t>>(64-62) - t = a[18] ^ d3 - bc1 = t<<55 | t>>(64-55) - t = a[4] ^ d4 - bc2 = t<<39 | t>>(64-39) - a[10] = bc0 ^ (bc2 &^ bc1) - a[21] = bc1 ^ (bc3 &^ bc2) - a[7] = bc2 ^ (bc4 &^ bc3) - a[18] = bc3 ^ (bc0 &^ bc4) - a[4] = bc4 ^ (bc1 &^ bc0) - - // Round 4 - bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] - bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] - bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] - bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] - bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] - d0 = bc4 ^ (bc1<<1 | bc1>>63) - d1 = bc0 ^ (bc2<<1 | bc2>>63) - d2 = bc1 ^ (bc3<<1 | bc3>>63) - d3 = bc2 ^ (bc4<<1 | bc4>>63) - d4 = bc3 ^ (bc0<<1 | bc0>>63) - - bc0 = a[0] ^ d0 - t = a[1] ^ d1 - bc1 = t<<44 | t>>(64-44) - t = a[2] ^ d2 - bc2 = t<<43 | t>>(64-43) - t = a[3] ^ d3 - bc3 = t<<21 | t>>(64-21) - t = a[4] ^ d4 - bc4 = t<<14 | t>>(64-14) - a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+3] - a[1] = bc1 ^ (bc3 &^ bc2) - a[2] = bc2 ^ (bc4 &^ bc3) - a[3] = bc3 ^ (bc0 &^ bc4) - a[4] = bc4 ^ (bc1 &^ bc0) - - t = a[5] ^ d0 - bc2 = t<<3 | t>>(64-3) - t = a[6] ^ d1 - bc3 = t<<45 | t>>(64-45) - t = a[7] ^ d2 - bc4 = t<<61 | t>>(64-61) - t = a[8] ^ d3 - bc0 = t<<28 | t>>(64-28) - t = a[9] ^ d4 - bc1 = t<<20 | t>>(64-20) - a[5] = bc0 ^ (bc2 &^ bc1) - a[6] = bc1 ^ (bc3 &^ bc2) - a[7] = bc2 ^ (bc4 &^ bc3) - a[8] = bc3 ^ (bc0 &^ bc4) - a[9] = bc4 ^ (bc1 &^ bc0) - - t = a[10] ^ d0 - bc4 = t<<18 | t>>(64-18) - t = a[11] ^ d1 - bc0 = t<<1 | t>>(64-1) - t = a[12] ^ d2 - bc1 = t<<6 | t>>(64-6) - t = a[13] ^ d3 - bc2 = t<<25 | t>>(64-25) - t = a[14] ^ d4 - bc3 = t<<8 | t>>(64-8) - a[10] = bc0 ^ (bc2 &^ bc1) - a[11] = bc1 ^ (bc3 &^ bc2) - a[12] = bc2 ^ (bc4 &^ bc3) - a[13] = bc3 ^ (bc0 &^ bc4) - a[14] = bc4 ^ (bc1 &^ bc0) - - t = a[15] ^ d0 - bc1 = t<<36 | t>>(64-36) - t = a[16] ^ d1 - bc2 = t<<10 | t>>(64-10) - t = a[17] ^ d2 - bc3 = t<<15 | t>>(64-15) - t = a[18] ^ d3 - bc4 = t<<56 | t>>(64-56) - t = a[19] ^ d4 - bc0 = t<<27 | t>>(64-27) - a[15] = bc0 ^ (bc2 &^ bc1) - a[16] = bc1 ^ (bc3 &^ bc2) - a[17] = bc2 ^ (bc4 &^ bc3) - a[18] = bc3 ^ (bc0 &^ bc4) - a[19] = bc4 ^ (bc1 &^ bc0) - - t = a[20] ^ d0 - bc3 = t<<41 | t>>(64-41) - t = a[21] ^ d1 - bc4 = t<<2 | t>>(64-2) - t = a[22] ^ d2 - bc0 = t<<62 | t>>(64-62) - t = a[23] ^ d3 - bc1 = t<<55 | t>>(64-55) - t = a[24] ^ d4 - bc2 = t<<39 | t>>(64-39) - a[20] = bc0 ^ (bc2 &^ bc1) - a[21] = bc1 ^ (bc3 &^ bc2) - a[22] = bc2 ^ (bc4 &^ bc3) - a[23] = bc3 ^ (bc0 &^ bc4) - a[24] = bc4 ^ (bc1 &^ bc0) - } -} diff --git a/vendor/golang.org/x/crypto/sha3/register.go b/vendor/golang.org/x/crypto/sha3/register.go deleted file mode 100644 index 8b4453a..0000000 --- a/vendor/golang.org/x/crypto/sha3/register.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.4 -// +build go1.4 - -package sha3 - -import ( - "crypto" -) - -func init() { - crypto.RegisterHash(crypto.SHA3_224, New224) - crypto.RegisterHash(crypto.SHA3_256, New256) - crypto.RegisterHash(crypto.SHA3_384, New384) - crypto.RegisterHash(crypto.SHA3_512, New512) -} diff --git a/vendor/golang.org/x/crypto/sha3/sha3.go b/vendor/golang.org/x/crypto/sha3/sha3.go deleted file mode 100644 index fa182be..0000000 --- a/vendor/golang.org/x/crypto/sha3/sha3.go +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package sha3 - -// spongeDirection indicates the direction bytes are flowing through the sponge. -type spongeDirection int - -const ( - // spongeAbsorbing indicates that the sponge is absorbing input. - spongeAbsorbing spongeDirection = iota - // spongeSqueezing indicates that the sponge is being squeezed. - spongeSqueezing -) - -const ( - // maxRate is the maximum size of the internal buffer. SHAKE-256 - // currently needs the largest buffer. - maxRate = 168 -) - -type state struct { - // Generic sponge components. - a [25]uint64 // main state of the hash - buf []byte // points into storage - rate int // the number of bytes of state to use - - // dsbyte contains the "domain separation" bits and the first bit of - // the padding. Sections 6.1 and 6.2 of [1] separate the outputs of the - // SHA-3 and SHAKE functions by appending bitstrings to the message. - // Using a little-endian bit-ordering convention, these are "01" for SHA-3 - // and "1111" for SHAKE, or 00000010b and 00001111b, respectively. Then the - // padding rule from section 5.1 is applied to pad the message to a multiple - // of the rate, which involves adding a "1" bit, zero or more "0" bits, and - // a final "1" bit. We merge the first "1" bit from the padding into dsbyte, - // giving 00000110b (0x06) and 00011111b (0x1f). - // [1] http://csrc.nist.gov/publications/drafts/fips-202/fips_202_draft.pdf - // "Draft FIPS 202: SHA-3 Standard: Permutation-Based Hash and - // Extendable-Output Functions (May 2014)" - dsbyte byte - - storage storageBuf - - // Specific to SHA-3 and SHAKE. - outputLen int // the default output size in bytes - state spongeDirection // whether the sponge is absorbing or squeezing -} - -// BlockSize returns the rate of sponge underlying this hash function. -func (d *state) BlockSize() int { return d.rate } - -// Size returns the output size of the hash function in bytes. -func (d *state) Size() int { return d.outputLen } - -// Reset clears the internal state by zeroing the sponge state and -// the byte buffer, and setting Sponge.state to absorbing. -func (d *state) Reset() { - // Zero the permutation's state. - for i := range d.a { - d.a[i] = 0 - } - d.state = spongeAbsorbing - d.buf = d.storage.asBytes()[:0] -} - -func (d *state) clone() *state { - ret := *d - if ret.state == spongeAbsorbing { - ret.buf = ret.storage.asBytes()[:len(ret.buf)] - } else { - ret.buf = ret.storage.asBytes()[d.rate-cap(d.buf) : d.rate] - } - - return &ret -} - -// permute applies the KeccakF-1600 permutation. It handles -// any input-output buffering. -func (d *state) permute() { - switch d.state { - case spongeAbsorbing: - // If we're absorbing, we need to xor the input into the state - // before applying the permutation. - xorIn(d, d.buf) - d.buf = d.storage.asBytes()[:0] - keccakF1600(&d.a) - case spongeSqueezing: - // If we're squeezing, we need to apply the permutation before - // copying more output. - keccakF1600(&d.a) - d.buf = d.storage.asBytes()[:d.rate] - copyOut(d, d.buf) - } -} - -// pads appends the domain separation bits in dsbyte, applies -// the multi-bitrate 10..1 padding rule, and permutes the state. -func (d *state) padAndPermute(dsbyte byte) { - if d.buf == nil { - d.buf = d.storage.asBytes()[:0] - } - // Pad with this instance's domain-separator bits. We know that there's - // at least one byte of space in d.buf because, if it were full, - // permute would have been called to empty it. dsbyte also contains the - // first one bit for the padding. See the comment in the state struct. - d.buf = append(d.buf, dsbyte) - zerosStart := len(d.buf) - d.buf = d.storage.asBytes()[:d.rate] - for i := zerosStart; i < d.rate; i++ { - d.buf[i] = 0 - } - // This adds the final one bit for the padding. Because of the way that - // bits are numbered from the LSB upwards, the final bit is the MSB of - // the last byte. - d.buf[d.rate-1] ^= 0x80 - // Apply the permutation - d.permute() - d.state = spongeSqueezing - d.buf = d.storage.asBytes()[:d.rate] - copyOut(d, d.buf) -} - -// Write absorbs more data into the hash's state. It produces an error -// if more data is written to the ShakeHash after writing -func (d *state) Write(p []byte) (written int, err error) { - if d.state != spongeAbsorbing { - panic("sha3: write to sponge after read") - } - if d.buf == nil { - d.buf = d.storage.asBytes()[:0] - } - written = len(p) - - for len(p) > 0 { - if len(d.buf) == 0 && len(p) >= d.rate { - // The fast path; absorb a full "rate" bytes of input and apply the permutation. - xorIn(d, p[:d.rate]) - p = p[d.rate:] - keccakF1600(&d.a) - } else { - // The slow path; buffer the input until we can fill the sponge, and then xor it in. - todo := d.rate - len(d.buf) - if todo > len(p) { - todo = len(p) - } - d.buf = append(d.buf, p[:todo]...) - p = p[todo:] - - // If the sponge is full, apply the permutation. - if len(d.buf) == d.rate { - d.permute() - } - } - } - - return -} - -// Read squeezes an arbitrary number of bytes from the sponge. -func (d *state) Read(out []byte) (n int, err error) { - // If we're still absorbing, pad and apply the permutation. - if d.state == spongeAbsorbing { - d.padAndPermute(d.dsbyte) - } - - n = len(out) - - // Now, do the squeezing. - for len(out) > 0 { - n := copy(out, d.buf) - d.buf = d.buf[n:] - out = out[n:] - - // Apply the permutation if we've squeezed the sponge dry. - if len(d.buf) == 0 { - d.permute() - } - } - - return -} - -// Sum applies padding to the hash state and then squeezes out the desired -// number of output bytes. -func (d *state) Sum(in []byte) []byte { - // Make a copy of the original hash so that caller can keep writing - // and summing. - dup := d.clone() - hash := make([]byte, dup.outputLen) - dup.Read(hash) - return append(in, hash...) -} diff --git a/vendor/golang.org/x/crypto/sha3/shake.go b/vendor/golang.org/x/crypto/sha3/shake.go deleted file mode 100644 index b455e65..0000000 --- a/vendor/golang.org/x/crypto/sha3/shake.go +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package sha3 - -// This file defines the ShakeHash interface, and provides -// functions for creating SHAKE and cSHAKE instances, as well as utility -// functions for hashing bytes to arbitrary-length output. -// -// -// SHAKE implementation is based on FIPS PUB 202 [1] -// cSHAKE implementations is based on NIST SP 800-185 [2] -// -// [1] https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf -// [2] https://doi.org/10.6028/NIST.SP.800-185 - -import ( - "encoding/binary" - "io" -) - -// ShakeHash defines the interface to hash functions that -// support arbitrary-length output. -type ShakeHash interface { - // Write absorbs more data into the hash's state. It panics if input is - // written to it after output has been read from it. - io.Writer - - // Read reads more output from the hash; reading affects the hash's - // state. (ShakeHash.Read is thus very different from Hash.Sum) - // It never returns an error. - io.Reader - - // Clone returns a copy of the ShakeHash in its current state. - Clone() ShakeHash - - // Reset resets the ShakeHash to its initial state. - Reset() -} - -// cSHAKE specific context -type cshakeState struct { - *state // SHA-3 state context and Read/Write operations - - // initBlock is the cSHAKE specific initialization set of bytes. It is initialized - // by newCShake function and stores concatenation of N followed by S, encoded - // by the method specified in 3.3 of [1]. - // It is stored here in order for Reset() to be able to put context into - // initial state. - initBlock []byte -} - -// Consts for configuring initial SHA-3 state -const ( - dsbyteShake = 0x1f - dsbyteCShake = 0x04 - rate128 = 168 - rate256 = 136 -) - -func bytepad(input []byte, w int) []byte { - // leftEncode always returns max 9 bytes - buf := make([]byte, 0, 9+len(input)+w) - buf = append(buf, leftEncode(uint64(w))...) - buf = append(buf, input...) - padlen := w - (len(buf) % w) - return append(buf, make([]byte, padlen)...) -} - -func leftEncode(value uint64) []byte { - var b [9]byte - binary.BigEndian.PutUint64(b[1:], value) - // Trim all but last leading zero bytes - i := byte(1) - for i < 8 && b[i] == 0 { - i++ - } - // Prepend number of encoded bytes - b[i-1] = 9 - i - return b[i-1:] -} - -func newCShake(N, S []byte, rate int, dsbyte byte) ShakeHash { - c := cshakeState{state: &state{rate: rate, dsbyte: dsbyte}} - - // leftEncode returns max 9 bytes - c.initBlock = make([]byte, 0, 9*2+len(N)+len(S)) - c.initBlock = append(c.initBlock, leftEncode(uint64(len(N)*8))...) - c.initBlock = append(c.initBlock, N...) - c.initBlock = append(c.initBlock, leftEncode(uint64(len(S)*8))...) - c.initBlock = append(c.initBlock, S...) - c.Write(bytepad(c.initBlock, c.rate)) - return &c -} - -// Reset resets the hash to initial state. -func (c *cshakeState) Reset() { - c.state.Reset() - c.Write(bytepad(c.initBlock, c.rate)) -} - -// Clone returns copy of a cSHAKE context within its current state. -func (c *cshakeState) Clone() ShakeHash { - b := make([]byte, len(c.initBlock)) - copy(b, c.initBlock) - return &cshakeState{state: c.clone(), initBlock: b} -} - -// Clone returns copy of SHAKE context within its current state. -func (c *state) Clone() ShakeHash { - return c.clone() -} - -// NewShake128 creates a new SHAKE128 variable-output-length ShakeHash. -// Its generic security strength is 128 bits against all attacks if at -// least 32 bytes of its output are used. -func NewShake128() ShakeHash { - return &state{rate: rate128, dsbyte: dsbyteShake} -} - -// NewShake256 creates a new SHAKE256 variable-output-length ShakeHash. -// Its generic security strength is 256 bits against all attacks if -// at least 64 bytes of its output are used. -func NewShake256() ShakeHash { - return &state{rate: rate256, dsbyte: dsbyteShake} -} - -// NewCShake128 creates a new instance of cSHAKE128 variable-output-length ShakeHash, -// a customizable variant of SHAKE128. -// N is used to define functions based on cSHAKE, it can be empty when plain cSHAKE is -// desired. S is a customization byte string used for domain separation - two cSHAKE -// computations on same input with different S yield unrelated outputs. -// When N and S are both empty, this is equivalent to NewShake128. -func NewCShake128(N, S []byte) ShakeHash { - if len(N) == 0 && len(S) == 0 { - return NewShake128() - } - return newCShake(N, S, rate128, dsbyteCShake) -} - -// NewCShake256 creates a new instance of cSHAKE256 variable-output-length ShakeHash, -// a customizable variant of SHAKE256. -// N is used to define functions based on cSHAKE, it can be empty when plain cSHAKE is -// desired. S is a customization byte string used for domain separation - two cSHAKE -// computations on same input with different S yield unrelated outputs. -// When N and S are both empty, this is equivalent to NewShake256. -func NewCShake256(N, S []byte) ShakeHash { - if len(N) == 0 && len(S) == 0 { - return NewShake256() - } - return newCShake(N, S, rate256, dsbyteCShake) -} - -// ShakeSum128 writes an arbitrary-length digest of data into hash. -func ShakeSum128(hash, data []byte) { - h := NewShake128() - h.Write(data) - h.Read(hash) -} - -// ShakeSum256 writes an arbitrary-length digest of data into hash. -func ShakeSum256(hash, data []byte) { - h := NewShake256() - h.Write(data) - h.Read(hash) -} diff --git a/vendor/golang.org/x/crypto/sha3/xor.go b/vendor/golang.org/x/crypto/sha3/xor.go deleted file mode 100644 index 61be607..0000000 --- a/vendor/golang.org/x/crypto/sha3/xor.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package sha3 - -// A storageBuf is an aligned array of maxRate bytes. -type storageBuf [maxRate]byte - -func (b *storageBuf) asBytes() *[maxRate]byte { - return (*[maxRate]byte)(b) -} - -var ( - xorIn = xorInGeneric - copyOut = copyOutGeneric - xorInUnaligned = xorInGeneric - copyOutUnaligned = copyOutGeneric -) - -const xorImplementationUnaligned = "generic" diff --git a/vendor/golang.org/x/crypto/sha3/xor_generic.go b/vendor/golang.org/x/crypto/sha3/xor_generic.go deleted file mode 100644 index 8d94771..0000000 --- a/vendor/golang.org/x/crypto/sha3/xor_generic.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package sha3 - -import "encoding/binary" - -// xorInGeneric xors the bytes in buf into the state; it -// makes no non-portable assumptions about memory layout -// or alignment. -func xorInGeneric(d *state, buf []byte) { - n := len(buf) / 8 - - for i := 0; i < n; i++ { - a := binary.LittleEndian.Uint64(buf) - d.a[i] ^= a - buf = buf[8:] - } -} - -// copyOutGeneric copies uint64s to a byte buffer. -func copyOutGeneric(d *state, b []byte) { - for i := 0; len(b) >= 8; i++ { - binary.LittleEndian.PutUint64(b, d.a[i]) - b = b[8:] - } -} diff --git a/vendor/golang.org/x/sync/LICENSE b/vendor/golang.org/x/sync/LICENSE deleted file mode 100644 index 6a66aea..0000000 --- a/vendor/golang.org/x/sync/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/sync/PATENTS b/vendor/golang.org/x/sync/PATENTS deleted file mode 100644 index 7330990..0000000 --- a/vendor/golang.org/x/sync/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go deleted file mode 100644 index 4c0850a..0000000 --- a/vendor/golang.org/x/sync/errgroup/errgroup.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package errgroup provides synchronization, error propagation, and Context -// cancelation for groups of goroutines working on subtasks of a common task. -package errgroup - -import ( - "context" - "fmt" - "sync" -) - -type token struct{} - -// A Group is a collection of goroutines working on subtasks that are part of -// the same overall task. -// -// A zero Group is valid, has no limit on the number of active goroutines, -// and does not cancel on error. -type Group struct { - cancel func() - - wg sync.WaitGroup - - sem chan token - - errOnce sync.Once - err error -} - -func (g *Group) done() { - if g.sem != nil { - <-g.sem - } - g.wg.Done() -} - -// WithContext returns a new Group and an associated Context derived from ctx. -// -// The derived Context is canceled the first time a function passed to Go -// returns a non-nil error or the first time Wait returns, whichever occurs -// first. -func WithContext(ctx context.Context) (*Group, context.Context) { - ctx, cancel := context.WithCancel(ctx) - return &Group{cancel: cancel}, ctx -} - -// Wait blocks until all function calls from the Go method have returned, then -// returns the first non-nil error (if any) from them. -func (g *Group) Wait() error { - g.wg.Wait() - if g.cancel != nil { - g.cancel() - } - return g.err -} - -// Go calls the given function in a new goroutine. -// It blocks until the new goroutine can be added without the number of -// active goroutines in the group exceeding the configured limit. -// -// The first call to return a non-nil error cancels the group; its error will be -// returned by Wait. -func (g *Group) Go(f func() error) { - if g.sem != nil { - g.sem <- token{} - } - - g.wg.Add(1) - go func() { - defer g.done() - - if err := f(); err != nil { - g.errOnce.Do(func() { - g.err = err - if g.cancel != nil { - g.cancel() - } - }) - } - }() -} - -// TryGo calls the given function in a new goroutine only if the number of -// active goroutines in the group is currently below the configured limit. -// -// The return value reports whether the goroutine was started. -func (g *Group) TryGo(f func() error) bool { - if g.sem != nil { - select { - case g.sem <- token{}: - // Note: this allows barging iff channels in general allow barging. - default: - return false - } - } - - g.wg.Add(1) - go func() { - defer g.done() - - if err := f(); err != nil { - g.errOnce.Do(func() { - g.err = err - if g.cancel != nil { - g.cancel() - } - }) - } - }() - return true -} - -// SetLimit limits the number of active goroutines in this group to at most n. -// A negative value indicates no limit. -// -// Any subsequent call to the Go method will block until it can add an active -// goroutine without exceeding the configured limit. -// -// The limit must not be modified while any goroutines in the group are active. -func (g *Group) SetLimit(n int) { - if n < 0 { - g.sem = nil - return - } - if len(g.sem) != 0 { - panic(fmt.Errorf("errgroup: modify limit while %v goroutines in the group are still active", len(g.sem))) - } - g.sem = make(chan token, n) -} diff --git a/vendor/golang.org/x/xerrors/LICENSE b/vendor/golang.org/x/xerrors/LICENSE deleted file mode 100644 index e4a47e1..0000000 --- a/vendor/golang.org/x/xerrors/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2019 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/xerrors/PATENTS b/vendor/golang.org/x/xerrors/PATENTS deleted file mode 100644 index 7330990..0000000 --- a/vendor/golang.org/x/xerrors/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/xerrors/README b/vendor/golang.org/x/xerrors/README deleted file mode 100644 index aac7867..0000000 --- a/vendor/golang.org/x/xerrors/README +++ /dev/null @@ -1,2 +0,0 @@ -This repository holds the transition packages for the new Go 1.13 error values. -See golang.org/design/29934-error-values. diff --git a/vendor/golang.org/x/xerrors/adaptor.go b/vendor/golang.org/x/xerrors/adaptor.go deleted file mode 100644 index 4317f24..0000000 --- a/vendor/golang.org/x/xerrors/adaptor.go +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xerrors - -import ( - "bytes" - "fmt" - "io" - "reflect" - "strconv" -) - -// FormatError calls the FormatError method of f with an errors.Printer -// configured according to s and verb, and writes the result to s. -func FormatError(f Formatter, s fmt.State, verb rune) { - // Assuming this function is only called from the Format method, and given - // that FormatError takes precedence over Format, it cannot be called from - // any package that supports errors.Formatter. It is therefore safe to - // disregard that State may be a specific printer implementation and use one - // of our choice instead. - - // limitations: does not support printing error as Go struct. - - var ( - sep = " " // separator before next error - p = &state{State: s} - direct = true - ) - - var err error = f - - switch verb { - // Note that this switch must match the preference order - // for ordinary string printing (%#v before %+v, and so on). - - case 'v': - if s.Flag('#') { - if stringer, ok := err.(fmt.GoStringer); ok { - io.WriteString(&p.buf, stringer.GoString()) - goto exit - } - // proceed as if it were %v - } else if s.Flag('+') { - p.printDetail = true - sep = "\n - " - } - case 's': - case 'q', 'x', 'X': - // Use an intermediate buffer in the rare cases that precision, - // truncation, or one of the alternative verbs (q, x, and X) are - // specified. - direct = false - - default: - p.buf.WriteString("%!") - p.buf.WriteRune(verb) - p.buf.WriteByte('(') - switch { - case err != nil: - p.buf.WriteString(reflect.TypeOf(f).String()) - default: - p.buf.WriteString("") - } - p.buf.WriteByte(')') - io.Copy(s, &p.buf) - return - } - -loop: - for { - switch v := err.(type) { - case Formatter: - err = v.FormatError((*printer)(p)) - case fmt.Formatter: - v.Format(p, 'v') - break loop - default: - io.WriteString(&p.buf, v.Error()) - break loop - } - if err == nil { - break - } - if p.needColon || !p.printDetail { - p.buf.WriteByte(':') - p.needColon = false - } - p.buf.WriteString(sep) - p.inDetail = false - p.needNewline = false - } - -exit: - width, okW := s.Width() - prec, okP := s.Precision() - - if !direct || (okW && width > 0) || okP { - // Construct format string from State s. - format := []byte{'%'} - if s.Flag('-') { - format = append(format, '-') - } - if s.Flag('+') { - format = append(format, '+') - } - if s.Flag(' ') { - format = append(format, ' ') - } - if okW { - format = strconv.AppendInt(format, int64(width), 10) - } - if okP { - format = append(format, '.') - format = strconv.AppendInt(format, int64(prec), 10) - } - format = append(format, string(verb)...) - fmt.Fprintf(s, string(format), p.buf.String()) - } else { - io.Copy(s, &p.buf) - } -} - -var detailSep = []byte("\n ") - -// state tracks error printing state. It implements fmt.State. -type state struct { - fmt.State - buf bytes.Buffer - - printDetail bool - inDetail bool - needColon bool - needNewline bool -} - -func (s *state) Write(b []byte) (n int, err error) { - if s.printDetail { - if len(b) == 0 { - return 0, nil - } - if s.inDetail && s.needColon { - s.needNewline = true - if b[0] == '\n' { - b = b[1:] - } - } - k := 0 - for i, c := range b { - if s.needNewline { - if s.inDetail && s.needColon { - s.buf.WriteByte(':') - s.needColon = false - } - s.buf.Write(detailSep) - s.needNewline = false - } - if c == '\n' { - s.buf.Write(b[k:i]) - k = i + 1 - s.needNewline = true - } - } - s.buf.Write(b[k:]) - if !s.inDetail { - s.needColon = true - } - } else if !s.inDetail { - s.buf.Write(b) - } - return len(b), nil -} - -// printer wraps a state to implement an xerrors.Printer. -type printer state - -func (s *printer) Print(args ...interface{}) { - if !s.inDetail || s.printDetail { - fmt.Fprint((*state)(s), args...) - } -} - -func (s *printer) Printf(format string, args ...interface{}) { - if !s.inDetail || s.printDetail { - fmt.Fprintf((*state)(s), format, args...) - } -} - -func (s *printer) Detail() bool { - s.inDetail = true - return s.printDetail -} diff --git a/vendor/golang.org/x/xerrors/codereview.cfg b/vendor/golang.org/x/xerrors/codereview.cfg deleted file mode 100644 index 3f8b14b..0000000 --- a/vendor/golang.org/x/xerrors/codereview.cfg +++ /dev/null @@ -1 +0,0 @@ -issuerepo: golang/go diff --git a/vendor/golang.org/x/xerrors/doc.go b/vendor/golang.org/x/xerrors/doc.go deleted file mode 100644 index 2ef99f5..0000000 --- a/vendor/golang.org/x/xerrors/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package xerrors implements functions to manipulate errors. -// -// This package is based on the Go 2 proposal for error values: -// -// https://golang.org/design/29934-error-values -// -// These functions were incorporated into the standard library's errors package -// in Go 1.13: -// - Is -// - As -// - Unwrap -// -// Also, Errorf's %w verb was incorporated into fmt.Errorf. -// -// Use this package to get equivalent behavior in all supported Go versions. -// -// No other features of this package were included in Go 1.13, and at present -// there are no plans to include any of them. -package xerrors // import "golang.org/x/xerrors" diff --git a/vendor/golang.org/x/xerrors/errors.go b/vendor/golang.org/x/xerrors/errors.go deleted file mode 100644 index e88d377..0000000 --- a/vendor/golang.org/x/xerrors/errors.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xerrors - -import "fmt" - -// errorString is a trivial implementation of error. -type errorString struct { - s string - frame Frame -} - -// New returns an error that formats as the given text. -// -// The returned error contains a Frame set to the caller's location and -// implements Formatter to show this information when printed with details. -func New(text string) error { - return &errorString{text, Caller(1)} -} - -func (e *errorString) Error() string { - return e.s -} - -func (e *errorString) Format(s fmt.State, v rune) { FormatError(e, s, v) } - -func (e *errorString) FormatError(p Printer) (next error) { - p.Print(e.s) - e.frame.Format(p) - return nil -} diff --git a/vendor/golang.org/x/xerrors/fmt.go b/vendor/golang.org/x/xerrors/fmt.go deleted file mode 100644 index 6df1866..0000000 --- a/vendor/golang.org/x/xerrors/fmt.go +++ /dev/null @@ -1,189 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xerrors - -import ( - "fmt" - "strings" - "unicode" - "unicode/utf8" - - "golang.org/x/xerrors/internal" -) - -const percentBangString = "%!" - -// Errorf formats according to a format specifier and returns the string as a -// value that satisfies error. -// -// The returned error includes the file and line number of the caller when -// formatted with additional detail enabled. If the last argument is an error -// the returned error's Format method will return it if the format string ends -// with ": %s", ": %v", or ": %w". If the last argument is an error and the -// format string ends with ": %w", the returned error implements an Unwrap -// method returning it. -// -// If the format specifier includes a %w verb with an error operand in a -// position other than at the end, the returned error will still implement an -// Unwrap method returning the operand, but the error's Format method will not -// return the wrapped error. -// -// It is invalid to include more than one %w verb or to supply it with an -// operand that does not implement the error interface. The %w verb is otherwise -// a synonym for %v. -// -// Deprecated: As of Go 1.13, use fmt.Errorf instead. -func Errorf(format string, a ...interface{}) error { - format = formatPlusW(format) - // Support a ": %[wsv]" suffix, which works well with xerrors.Formatter. - wrap := strings.HasSuffix(format, ": %w") - idx, format2, ok := parsePercentW(format) - percentWElsewhere := !wrap && idx >= 0 - if !percentWElsewhere && (wrap || strings.HasSuffix(format, ": %s") || strings.HasSuffix(format, ": %v")) { - err := errorAt(a, len(a)-1) - if err == nil { - return &noWrapError{fmt.Sprintf(format, a...), nil, Caller(1)} - } - // TODO: this is not entirely correct. The error value could be - // printed elsewhere in format if it mixes numbered with unnumbered - // substitutions. With relatively small changes to doPrintf we can - // have it optionally ignore extra arguments and pass the argument - // list in its entirety. - msg := fmt.Sprintf(format[:len(format)-len(": %s")], a[:len(a)-1]...) - frame := Frame{} - if internal.EnableTrace { - frame = Caller(1) - } - if wrap { - return &wrapError{msg, err, frame} - } - return &noWrapError{msg, err, frame} - } - // Support %w anywhere. - // TODO: don't repeat the wrapped error's message when %w occurs in the middle. - msg := fmt.Sprintf(format2, a...) - if idx < 0 { - return &noWrapError{msg, nil, Caller(1)} - } - err := errorAt(a, idx) - if !ok || err == nil { - // Too many %ws or argument of %w is not an error. Approximate the Go - // 1.13 fmt.Errorf message. - return &noWrapError{fmt.Sprintf("%sw(%s)", percentBangString, msg), nil, Caller(1)} - } - frame := Frame{} - if internal.EnableTrace { - frame = Caller(1) - } - return &wrapError{msg, err, frame} -} - -func errorAt(args []interface{}, i int) error { - if i < 0 || i >= len(args) { - return nil - } - err, ok := args[i].(error) - if !ok { - return nil - } - return err -} - -// formatPlusW is used to avoid the vet check that will barf at %w. -func formatPlusW(s string) string { - return s -} - -// Return the index of the only %w in format, or -1 if none. -// Also return a rewritten format string with %w replaced by %v, and -// false if there is more than one %w. -// TODO: handle "%[N]w". -func parsePercentW(format string) (idx int, newFormat string, ok bool) { - // Loosely copied from golang.org/x/tools/go/analysis/passes/printf/printf.go. - idx = -1 - ok = true - n := 0 - sz := 0 - var isW bool - for i := 0; i < len(format); i += sz { - if format[i] != '%' { - sz = 1 - continue - } - // "%%" is not a format directive. - if i+1 < len(format) && format[i+1] == '%' { - sz = 2 - continue - } - sz, isW = parsePrintfVerb(format[i:]) - if isW { - if idx >= 0 { - ok = false - } else { - idx = n - } - // "Replace" the last character, the 'w', with a 'v'. - p := i + sz - 1 - format = format[:p] + "v" + format[p+1:] - } - n++ - } - return idx, format, ok -} - -// Parse the printf verb starting with a % at s[0]. -// Return how many bytes it occupies and whether the verb is 'w'. -func parsePrintfVerb(s string) (int, bool) { - // Assume only that the directive is a sequence of non-letters followed by a single letter. - sz := 0 - var r rune - for i := 1; i < len(s); i += sz { - r, sz = utf8.DecodeRuneInString(s[i:]) - if unicode.IsLetter(r) { - return i + sz, r == 'w' - } - } - return len(s), false -} - -type noWrapError struct { - msg string - err error - frame Frame -} - -func (e *noWrapError) Error() string { - return fmt.Sprint(e) -} - -func (e *noWrapError) Format(s fmt.State, v rune) { FormatError(e, s, v) } - -func (e *noWrapError) FormatError(p Printer) (next error) { - p.Print(e.msg) - e.frame.Format(p) - return e.err -} - -type wrapError struct { - msg string - err error - frame Frame -} - -func (e *wrapError) Error() string { - return fmt.Sprint(e) -} - -func (e *wrapError) Format(s fmt.State, v rune) { FormatError(e, s, v) } - -func (e *wrapError) FormatError(p Printer) (next error) { - p.Print(e.msg) - e.frame.Format(p) - return e.err -} - -func (e *wrapError) Unwrap() error { - return e.err -} diff --git a/vendor/golang.org/x/xerrors/format.go b/vendor/golang.org/x/xerrors/format.go deleted file mode 100644 index 1bc9c26..0000000 --- a/vendor/golang.org/x/xerrors/format.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xerrors - -// A Formatter formats error messages. -type Formatter interface { - error - - // FormatError prints the receiver's first error and returns the next error in - // the error chain, if any. - FormatError(p Printer) (next error) -} - -// A Printer formats error messages. -// -// The most common implementation of Printer is the one provided by package fmt -// during Printf (as of Go 1.13). Localization packages such as golang.org/x/text/message -// typically provide their own implementations. -type Printer interface { - // Print appends args to the message output. - Print(args ...interface{}) - - // Printf writes a formatted string. - Printf(format string, args ...interface{}) - - // Detail reports whether error detail is requested. - // After the first call to Detail, all text written to the Printer - // is formatted as additional detail, or ignored when - // detail has not been requested. - // If Detail returns false, the caller can avoid printing the detail at all. - Detail() bool -} diff --git a/vendor/golang.org/x/xerrors/frame.go b/vendor/golang.org/x/xerrors/frame.go deleted file mode 100644 index 0de628e..0000000 --- a/vendor/golang.org/x/xerrors/frame.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xerrors - -import ( - "runtime" -) - -// A Frame contains part of a call stack. -type Frame struct { - // Make room for three PCs: the one we were asked for, what it called, - // and possibly a PC for skipPleaseUseCallersFrames. See: - // https://go.googlesource.com/go/+/032678e0fb/src/runtime/extern.go#169 - frames [3]uintptr -} - -// Caller returns a Frame that describes a frame on the caller's stack. -// The argument skip is the number of frames to skip over. -// Caller(0) returns the frame for the caller of Caller. -func Caller(skip int) Frame { - var s Frame - runtime.Callers(skip+1, s.frames[:]) - return s -} - -// location reports the file, line, and function of a frame. -// -// The returned function may be "" even if file and line are not. -func (f Frame) location() (function, file string, line int) { - frames := runtime.CallersFrames(f.frames[:]) - if _, ok := frames.Next(); !ok { - return "", "", 0 - } - fr, ok := frames.Next() - if !ok { - return "", "", 0 - } - return fr.Function, fr.File, fr.Line -} - -// Format prints the stack as error detail. -// It should be called from an error's Format implementation -// after printing any other error detail. -func (f Frame) Format(p Printer) { - if p.Detail() { - function, file, line := f.location() - if function != "" { - p.Printf("%s\n ", function) - } - if file != "" { - p.Printf("%s:%d\n", file, line) - } - } -} diff --git a/vendor/golang.org/x/xerrors/internal/internal.go b/vendor/golang.org/x/xerrors/internal/internal.go deleted file mode 100644 index 89f4eca..0000000 --- a/vendor/golang.org/x/xerrors/internal/internal.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package internal - -// EnableTrace indicates whether stack information should be recorded in errors. -var EnableTrace = true diff --git a/vendor/golang.org/x/xerrors/wrap.go b/vendor/golang.org/x/xerrors/wrap.go deleted file mode 100644 index 9842758..0000000 --- a/vendor/golang.org/x/xerrors/wrap.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xerrors - -import ( - "reflect" -) - -// A Wrapper provides context around another error. -type Wrapper interface { - // Unwrap returns the next error in the error chain. - // If there is no next error, Unwrap returns nil. - Unwrap() error -} - -// Opaque returns an error with the same error formatting as err -// but that does not match err and cannot be unwrapped. -func Opaque(err error) error { - return noWrapper{err} -} - -type noWrapper struct { - error -} - -func (e noWrapper) FormatError(p Printer) (next error) { - if f, ok := e.error.(Formatter); ok { - return f.FormatError(p) - } - p.Print(e.error) - return nil -} - -// Unwrap returns the result of calling the Unwrap method on err, if err implements -// Unwrap. Otherwise, Unwrap returns nil. -// -// Deprecated: As of Go 1.13, use errors.Unwrap instead. -func Unwrap(err error) error { - u, ok := err.(Wrapper) - if !ok { - return nil - } - return u.Unwrap() -} - -// Is reports whether any error in err's chain matches target. -// -// An error is considered to match a target if it is equal to that target or if -// it implements a method Is(error) bool such that Is(target) returns true. -// -// Deprecated: As of Go 1.13, use errors.Is instead. -func Is(err, target error) bool { - if target == nil { - return err == target - } - - isComparable := reflect.TypeOf(target).Comparable() - for { - if isComparable && err == target { - return true - } - if x, ok := err.(interface{ Is(error) bool }); ok && x.Is(target) { - return true - } - // TODO: consider supporing target.Is(err). This would allow - // user-definable predicates, but also may allow for coping with sloppy - // APIs, thereby making it easier to get away with them. - if err = Unwrap(err); err == nil { - return false - } - } -} - -// As finds the first error in err's chain that matches the type to which target -// points, and if so, sets the target to its value and returns true. An error -// matches a type if it is assignable to the target type, or if it has a method -// As(interface{}) bool such that As(target) returns true. As will panic if target -// is not a non-nil pointer to a type which implements error or is of interface type. -// -// The As method should set the target to its value and return true if err -// matches the type to which target points. -// -// Deprecated: As of Go 1.13, use errors.As instead. -func As(err error, target interface{}) bool { - if target == nil { - panic("errors: target cannot be nil") - } - val := reflect.ValueOf(target) - typ := val.Type() - if typ.Kind() != reflect.Ptr || val.IsNil() { - panic("errors: target must be a non-nil pointer") - } - if e := typ.Elem(); e.Kind() != reflect.Interface && !e.Implements(errorType) { - panic("errors: *target must be interface or implement error") - } - targetType := typ.Elem() - for err != nil { - if reflect.TypeOf(err).AssignableTo(targetType) { - val.Elem().Set(reflect.ValueOf(err)) - return true - } - if x, ok := err.(interface{ As(interface{}) bool }); ok && x.As(target) { - return true - } - err = Unwrap(err) - } - return false -} - -var errorType = reflect.TypeOf((*error)(nil)).Elem() diff --git a/vendor/modules.txt b/vendor/modules.txt deleted file mode 100644 index a1357ca..0000000 --- a/vendor/modules.txt +++ /dev/null @@ -1,145 +0,0 @@ -# github.com/davecgh/go-spew v1.1.1 => github.com/ipfs-force-community/go-spew v1.1.2-0.20220524052205-0034150c051a -## explicit -github.com/davecgh/go-spew/spew -# github.com/filecoin-project/go-address v0.0.6 => github.com/ipfs-force-community/go-address v0.0.7-0.20220524010936-42617a156be1 -## explicit; go 1.13 -github.com/filecoin-project/go-address -# github.com/filecoin-project/go-amt-ipld/v4 v4.0.0 -## explicit; go 1.16 -github.com/filecoin-project/go-amt-ipld/v4 -github.com/filecoin-project/go-amt-ipld/v4/internal -# github.com/filecoin-project/go-bitfield v0.2.4 -## explicit; go 1.13 -github.com/filecoin-project/go-bitfield -github.com/filecoin-project/go-bitfield/rle -# github.com/filecoin-project/go-commp-utils v0.1.3 -## explicit; go 1.15 -github.com/filecoin-project/go-commp-utils/zerocomm -# github.com/filecoin-project/go-commp-utils/nonffi v0.0.0-20220905160352-62059082a837 -## explicit; go 1.17 -github.com/filecoin-project/go-commp-utils/nonffi -# github.com/filecoin-project/go-fil-commcid v0.1.0 -## explicit; go 1.13 -github.com/filecoin-project/go-fil-commcid -# github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0 -## explicit; go 1.15 -github.com/filecoin-project/go-hamt-ipld/v3 -# github.com/filecoin-project/go-state-types v0.1.12-alpha -## explicit; go 1.13 -github.com/filecoin-project/go-state-types/abi -github.com/filecoin-project/go-state-types/big -github.com/filecoin-project/go-state-types/builtin -github.com/filecoin-project/go-state-types/builtin/v8/market -github.com/filecoin-project/go-state-types/builtin/v8/miner -github.com/filecoin-project/go-state-types/builtin/v8/power -github.com/filecoin-project/go-state-types/builtin/v8/system -github.com/filecoin-project/go-state-types/builtin/v8/util/adt -github.com/filecoin-project/go-state-types/builtin/v8/util/math -github.com/filecoin-project/go-state-types/builtin/v8/util/smoothing -github.com/filecoin-project/go-state-types/builtin/v9/migration -github.com/filecoin-project/go-state-types/builtin/v9/miner -github.com/filecoin-project/go-state-types/builtin/v9/power -github.com/filecoin-project/go-state-types/builtin/v9/util/adt -github.com/filecoin-project/go-state-types/builtin/v9/util/math -github.com/filecoin-project/go-state-types/builtin/v9/util/smoothing -github.com/filecoin-project/go-state-types/cbor -github.com/filecoin-project/go-state-types/crypto -github.com/filecoin-project/go-state-types/dline -github.com/filecoin-project/go-state-types/exitcode -github.com/filecoin-project/go-state-types/manifest -github.com/filecoin-project/go-state-types/network -github.com/filecoin-project/go-state-types/proof -github.com/filecoin-project/go-state-types/rt -# github.com/filecoin-project/specs-actors v0.9.13 -## explicit; go 1.13 -github.com/filecoin-project/specs-actors/actors/runtime -github.com/filecoin-project/specs-actors/actors/runtime/proof -# github.com/filecoin-project/specs-actors/v2 v2.3.5-0.20210114162132-5b58b773f4fb -## explicit; go 1.13 -github.com/filecoin-project/specs-actors/v2/actors/runtime/proof -# github.com/filecoin-project/specs-actors/v5 v5.0.4 -## explicit; go 1.16 -github.com/filecoin-project/specs-actors/v5/actors/runtime/proof -# github.com/filecoin-project/specs-actors/v7 v7.0.0 -## explicit; go 1.17 -github.com/filecoin-project/specs-actors/v7/actors/runtime -github.com/filecoin-project/specs-actors/v7/actors/runtime/proof -# github.com/ipfs/go-block-format v0.0.3 => github.com/ipfs-force-community/go-block-format v0.0.4-0.20220425095807-073e9266335c -## explicit; go 1.17 -github.com/ipfs/go-block-format -# github.com/ipfs/go-cid v0.2.0 -## explicit; go 1.17 -github.com/ipfs/go-cid -# github.com/ipfs/go-ipld-cbor v0.0.6 => github.com/ipfs-force-community/go-ipld-cbor v0.0.7-0.20220713070731-f5190aacb1a4 -## explicit; go 1.17 -github.com/ipfs/go-ipld-cbor -# github.com/klauspost/cpuid/v2 v2.0.6 => github.com/ipfs-force-community/cpuid/v2 v2.0.13-0.20220523085810-ac111993ce74 -## explicit; go 1.15 -github.com/klauspost/cpuid/v2 -# github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 => github.com/ipfs-force-community/blake2b-simd v0.0.0-20220523083450-6e9a68832d69 -## explicit -github.com/minio/blake2b-simd -# github.com/minio/sha256-simd v1.0.0 => github.com/ipfs-force-community/sha256-simd v1.0.1-0.20220421100150-fcbba4b6ea96 -## explicit; go 1.13 -github.com/minio/sha256-simd -# github.com/mr-tron/base58 v1.2.0 -## explicit; go 1.12 -github.com/mr-tron/base58/base58 -# github.com/multiformats/go-base32 v0.0.3 -## explicit -github.com/multiformats/go-base32 -# github.com/multiformats/go-base36 v0.1.0 -## explicit; go 1.11 -github.com/multiformats/go-base36 -# github.com/multiformats/go-multibase v0.0.3 -## explicit; go 1.11 -github.com/multiformats/go-multibase -# github.com/multiformats/go-multihash v0.0.15 -## explicit; go 1.13 -github.com/multiformats/go-multihash -github.com/multiformats/go-multihash/core -github.com/multiformats/go-multihash/register/all -github.com/multiformats/go-multihash/register/blake2 -github.com/multiformats/go-multihash/register/miniosha256 -github.com/multiformats/go-multihash/register/sha3 -# github.com/multiformats/go-varint v0.0.6 -## explicit; go 1.12 -github.com/multiformats/go-varint -# github.com/pmezard/go-difflib v1.0.0 -## explicit -github.com/pmezard/go-difflib/difflib -# github.com/polydawn/refmt v0.0.0-20190809202753-05966cbd336a => github.com/hunjixin/refmt v0.0.0-20220520091210-cb3c7d292019 -## explicit -github.com/polydawn/refmt/obj/atlas -# github.com/spaolacci/murmur3 v1.1.0 -## explicit -github.com/spaolacci/murmur3 -# github.com/stretchr/testify v1.7.0 => github.com/ipfs-force-community/testify v1.7.1-0.20220616060316-ea4f53121ac3 -## explicit; go 1.13 -github.com/stretchr/testify/assert -# github.com/whyrusleeping/cbor-gen v0.0.0-20220323183124-98fa8256a799 => github.com/ipfs-force-community/cbor-gen v0.0.0-20220421100448-dc345220256c -## explicit; go 1.12 -github.com/whyrusleeping/cbor-gen -# golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf => github.com/ipfs-force-community/crypto v0.0.0-20220523090957-2aff239c26f7 -## explicit; go 1.17 -golang.org/x/crypto/blake2s -golang.org/x/crypto/sha3 -# golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 -## explicit -golang.org/x/sync/errgroup -# golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f -## explicit; go 1.11 -golang.org/x/xerrors -golang.org/x/xerrors/internal -# github.com/davecgh/go-spew => github.com/ipfs-force-community/go-spew v1.1.2-0.20220524052205-0034150c051a -# github.com/filecoin-project/go-address => github.com/ipfs-force-community/go-address v0.0.7-0.20220524010936-42617a156be1 -# github.com/ipfs/go-block-format => github.com/ipfs-force-community/go-block-format v0.0.4-0.20220425095807-073e9266335c -# github.com/ipfs/go-ipld-cbor => github.com/ipfs-force-community/go-ipld-cbor v0.0.7-0.20220713070731-f5190aacb1a4 -# github.com/klauspost/cpuid/v2 => github.com/ipfs-force-community/cpuid/v2 v2.0.13-0.20220523085810-ac111993ce74 -# github.com/minio/blake2b-simd => github.com/ipfs-force-community/blake2b-simd v0.0.0-20220523083450-6e9a68832d69 -# github.com/minio/sha256-simd => github.com/ipfs-force-community/sha256-simd v1.0.1-0.20220421100150-fcbba4b6ea96 -# github.com/polydawn/refmt => github.com/hunjixin/refmt v0.0.0-20220520091210-cb3c7d292019 -# github.com/stretchr/testify => github.com/ipfs-force-community/testify v1.7.1-0.20220616060316-ea4f53121ac3 -# github.com/whyrusleeping/cbor-gen => github.com/ipfs-force-community/cbor-gen v0.0.0-20220421100448-dc345220256c -# golang.org/x/crypto => github.com/ipfs-force-community/crypto v0.0.0-20220523090957-2aff239c26f7 -# lukechampine.com/blake3 => github.com/ipfs-force-community/blake3 v1.1.8-0.20220609024944-51450f2b2fc0